def __init__(self, logger, config, name=None): self.logger = logger self.config = config if name: self.hostname = name else: self.hostname = this_host()
def reconfigure(self, attribute, value): controls = {attribute: value} controls_json = json.dumps(controls) ui_root = self.get_ui_root() disk = ui_root.disks.disk_lookup[self.image_id] if not disk.owner: self.logger.error("Cannot reconfigure until disk assigned to target") return local_gw = this_host() # Issue the api request for reconfigure disk_api = ('{}://localhost:{}/api/' 'disk/{}'.format(self.http_mode, settings.config.api_port, self.image_id)) api_vars = {'pool': self.pool, 'owner': local_gw, 'controls': controls_json, 'mode': 'reconfigure'} self.logger.debug("Issuing reconfigure request: attribute={}, " "value={}".format(attribute, value)) api = APIRequest(disk_api, data=api_vars) api.put() if api.response.status_code == 200: self.logger.info('ok') self._refresh_config() else: self.logger.error("Failed to reconfigure : " "{}".format(response_message(api.response, self.logger)))
def clearconfig(): """ Clear the LIO configuration of the settings defined by the config object We could simply call the clear_existing method of rtsroot - but if the admin has defined additional non ceph iscsi exports they'd loose everything :param local_gw: (str) gateway name :return: (int) 0 = LIO configuration removed/not-required 4 = LUN removal problem encountered 8 = Gateway (target/tpgs) removal failed """ local_gw = this_host() # clear the current config, based on the config objects settings lio = LIO() gw = Gateway(config) # This will fail incoming IO, but wait on outstanding IO to # complete normally. We rely on the initiator multipath layer # to handle retries like a normal path failure. logger.info("Removing iSCSI target from LIO") gw.drop_target(local_gw) if gw.error: logger.error("rbd-target-gw failed to remove target objects") return 8 logger.info("Removing LUNs from LIO") lio.drop_lun_maps(config, False) if lio.error: logger.error("rbd-target-gw failed to remove LUN objects") return 4 logger.info("Active Ceph iSCSI gateway configuration removed") return 0
def signal_stop(*args): """ Handler to shutdown the service when systemd sends SIGTERM NB - args has to be specified since python will pass two parms into the handler by default :param args: ignored/unused """ logger.info("rbd-target-gw stop received, refreshing local state") config.refresh() if config.error: logger.critical("Problems accessing config object" " - {}".format(config.error_msg)) sys.exit(16) local_gw = this_host() if "gateways" in config.config: if local_gw not in config.config["gateways"]: logger.info("No gateway configuration to remove on this host " "({})".format(local_gw)) sys.exit(0) else: logger.info("Configuration object does not hold any gateway metadata" " - nothing to do") sys.exit(0) rc = clearconfig() sys.exit(rc)
def manage_client(client_iqn): """ Manage a client definition to the local gateway Internal Use ONLY :param client_iqn: iscsi name for the client **RESTRICTED** """ if request.method == 'GET': if client_iqn in config.config['clients']: return jsonify(config.config["clients"][client_iqn]), 200 else: return jsonify(message="Client does not exist"), 404 elif request.method == 'PUT': try: valid_iqn = normalize_wwn(['iqn'], client_iqn) except RTSLibError: return jsonify(message="'{}' is not a valid name for " "iSCSI".format(client_iqn)), 400 committing_host = request.form['committing_host'] image_list = request.form.get('image_list', '') chap = request.form.get('chap', '') status_code, status_text = _update_client(client_iqn=client_iqn, images=image_list, chap=chap, committing_host=committing_host) logger.debug("client create: {}".format(status_code)) logger.debug("client create: {}".format(status_text)) return jsonify(message=status_text), status_code else: # DELETE request committing_host = request.form['committing_host'] # Make sure the delete request is for a client we have defined if client_iqn in config.config['clients'].keys(): client = GWClient(logger, client_iqn, '', '') client.manage('absent', committer=committing_host) if client.error: logger.error("Failed to remove client : " "{}".format(client.error_msg)) return jsonify(message="Failed to remove client"), 500 else: if committing_host == this_host(): config.refresh() return jsonify(message="Client deleted ok"), 200 else: logger.error("Delete request for non existent client!") return jsonify(message="Client does not exist!"), 404
def _get_gateway_name(self, ip): if ip in self.active_portal_ips: return this_host() target_config = self.config.config['targets'][self.iqn] for portal_name, portal_config in target_config['portals'].items(): if ip in portal_config['portal_ip_addresses']: return portal_name return None
def apply_config(): """ procesing logic that orchestrates the creation of the iSCSI gateway to LIO. """ # access config_loading from the outer scope, for r/w global config_loading config_loading = True local_gw = this_host() logger.info("Reading the configuration object to update local LIO " "configuration") # first check to see if we have any entries to handle - if not, there is # no work to do.. if "gateways" not in config.config: logger.info("Configuration is empty - nothing to define to LIO") config_loading = False return if local_gw not in config.config['gateways']: logger.info("Configuration does not have an entry for this host({}) - " "nothing to define to LIO".format(local_gw)) config_loading = False return # at this point we have a gateway entry that applies to the running host portals_already_active = portals_active() logger.info("Processing Gateway configuration") gateway = define_gateway() logger.info("Processing LUN configuration") try: LUN.define_luns(logger, config, gateway) except CephiSCSIError as err: halt("Could not define LUNs: {}".format(err)) logger.info("Processing client configuration") try: GWClient.define_clients(logger, config) except CephiSCSIError as err: halt("Could not define clients: {}".format(err)) if not portals_already_active: # The tpgs, luns and clients are all defined, but the active tpg # doesn't have an IP bound to it yet (due to the enable_portals=False # setting above) logger.info("Adding the IP to the enabled tpg, allowing iSCSI logins") gateway.enable_active_tpg(config) if gateway.error: halt("Error enabling the IP with the active TPG: {}".format( gateway.error_msg)) config_loading = False logger.info("iSCSI configuration load complete")
def delete_disk(self, image_id, preserve_image): all_disks = [] for pool in self.children: for disk in pool.children: all_disks.append(disk) # Perform a quick 'sniff' test on the request if image_id not in [disk.image_id for disk in all_disks]: self.logger.error("Disk '{}' is not defined to the " "configuration".format(image_id)) return self.logger.debug("CMD: /disks delete {}".format(image_id)) self.logger.debug("Starting delete for rbd {}".format(image_id)) local_gw = this_host() api_vars = { 'purge_host': local_gw, 'preserve_image': 'true' if preserve_image else 'false' } disk_api = '{}://{}:{}/api/disk/{}'.format(self.http_mode, local_gw, settings.config.api_port, image_id) api = APIRequest(disk_api, data=api_vars) api.delete() if api.response.status_code == 200: self.logger.debug("- rbd removed from all gateways, and deleted") disk_object = [ disk for disk in all_disks if disk.image_id == image_id ][0] pool, _ = image_id.split('/') pool_object = [ pool_object for pool_object in self.children if pool_object.name == pool ][0] pool_object.remove_child(disk_object) if len(pool_object.children) == 0: self.remove_child(pool_object) del self.disk_info[image_id] del self.disk_lookup[image_id] else: self.logger.debug("delete request failed - " "{}".format(api.response.status_code)) self.logger.error("{}".format( response_message(api.response, self.logger))) return ceph_pools = self.parent.ceph.cluster.pools ceph_pools.refresh() self.logger.info('ok')
def unmap_lun(self, target_iqn): local_gw = this_host() self.logger.info("LUN unmap request received, config commit to be " "performed by {}".format(self.allocating_host)) target_config = self.config.config['targets'][target_iqn] # First ensure the LUN is not in use clients = target_config['clients'] for client_iqn in clients: client_luns = clients[client_iqn]['luns'].keys() if self.config_key in client_luns: self.error = True self.error_msg = ( "Unable to delete {} - allocated to {}".format( self.config_key, client_iqn)) self.logger.warning(self.error_msg) return # Check that the LUN is in LIO - if not there is nothing to do for # this request lun = self.lio_stg_object() if not lun: return # Now we know the request is for a LUN in LIO, and it's not masked # to a client self.remove_dev_from_lio() if self.error: return if local_gw == self.allocating_host: # by using the allocating host we ensure the delete is not # issue by several hosts when initiated through ansible target_config['disks'].pop(self.config_key) self.config.update_item("targets", target_iqn, target_config) # determine which host was the path owner disk_owner = self.config.config['disks'][self.config_key]['owner'] # update the active_luns count for gateway that owned this # lun gw_metadata = self.config.config['gateways'][disk_owner] if gw_metadata['active_luns'] > 0: gw_metadata['active_luns'] -= 1 self.config.update_item('gateways', disk_owner, gw_metadata) disk_metadata = self.config.config['disks'][self.config_key] if 'owner' in disk_metadata: del disk_metadata['owner'] self.logger.debug("{} owner deleted".format(self.config_key)) self.config.update_item("disks", self.config_key, disk_metadata) self.config.commit()
def resize(self, size): """ Perform the resize operation, and sync the disk size across each of the gateways :param size: (int) new size for the rbd image :return: """ # resize is actually managed by the same lun and api endpoint as # create so this logic is very similar to a 'create' request size_rqst = size.upper() if not valid_size(size_rqst): self.logger.error("Size is invalid") return # At this point the size request needs to be honoured self.logger.debug("Resizing {} to {}".format(self.image_id, size_rqst)) local_gw = this_host() # Issue the api request for the resize disk_api = ('{}://localhost:{}/api/' 'disk/{}'.format(self.http_mode, settings.config.api_port, self.image_id)) api_vars = { 'pool': self.pool, 'size': size_rqst, 'owner': local_gw, 'mode': 'resize' } self.logger.debug("Issuing resize request") api = APIRequest(disk_api, data=api_vars) api.put() if api.response.status_code == 200: # at this point the resize request was successful, so we need to # update the ceph pool meta data (%commit etc) self._update_pool() self.size_h = size_rqst self.size = convert_2_bytes(size_rqst) self.logger.info('ok') else: self.logger.error("Failed to resize : " "{}".format( response_message(api.response, self.logger)))
def logged_in(self): target_iqn = self.parent.parent.name gateways = self.parent.parent.get_child('gateways') local_gw = this_host() is_local_target = len( [child for child in gateways.children if child.name == local_gw]) > 0 if is_local_target: client_info = GWClient.get_client_info(target_iqn, self.client_iqn) self.alias = client_info['alias'] self.ip_address = ','.join(client_info['ip_address']) return client_info['state'] else: self.alias = '' self.ip_address = '' return ''
def remove_lun(self, preserve_image): local_gw = this_host() self.logger.info("LUN deletion request received, rbd removal to be " "performed by {}".format(self.allocating_host)) # First ensure the LUN is not in use for target_iqn, target in self.config.config['targets'].items(): if self.config_key in target['disks']: self.error = True self.error_msg = ("Unable to delete {} - allocated to " "{}".format(self.config_key, target_iqn)) self.logger.warning(self.error_msg) return # Check that the LUN is in LIO - if not there is nothing to do for # this request lun = self.lio_stg_object() if lun: # Now we know the request is for a LUN in LIO, and it's not masked # to a client self.remove_dev_from_lio() if self.error: return rbd_image = RBDDev(self.image, '0G', self.backstore, self.pool) if local_gw == self.allocating_host: # by using the allocating host we ensure the delete is not # issue by several hosts when initiated through ansible if not preserve_image: rbd_image.delete() if rbd_image.error: self.error = True self.error_msg = rbd_image.error_msg return # remove the definition from the config object self.config.del_item('disks', self.config_key) self.config.commit()
def get_remote_gateways(config, logger, local_gw_required=True): """ Return the list of remote gws. :param: config: Config object with gws setup. :param: logger: Logger object :param: local_gw_required: Check if local_gw is defined within gateways configuration :return: A list of gw names, or CephiSCSIError if not run on a gw in the config """ local_gw = this_host() logger.debug("this host is {}".format(local_gw)) gateways = [key for key in config if isinstance(config[key], dict)] logger.debug("all gateways - {}".format(gateways)) if local_gw_required and local_gw not in gateways: raise CephiSCSIError("{} cannot be used to perform this operation " "because it is not defined within the gateways " "configuration".format(local_gw)) if local_gw in gateways: gateways.remove(local_gw) logger.debug("remote gateways: {}".format(gateways)) return gateways
def allocate(self, keep_dev_in_lio=True): """ Create image and add to LIO and config. :param keep_dev_in_lio: (bool) false if the LIO so should be removed after allocating the wwn. :return: LIO storage object if successful and keep_dev_in_lio=True else None. """ self.logger.debug("LUN.allocate starting, listing rbd devices") disk_list = RBDDev.rbd_list(pool=self.pool) self.logger.debug("rados pool '{}' contains the following - " "{}".format(self.pool, disk_list)) local_gw = this_host() self.logger.debug("Hostname Check - this host is {}, target host for " "allocations is {}".format(local_gw, self.allocating_host)) rbd_image = RBDDev(self.image, self.size_bytes, self.backstore, self.pool) self.pool_id = rbd_image.pool_id # if the image required isn't defined, create it! if self.image not in disk_list: # create the requested disk if this is the 'owning' host if local_gw == self.allocating_host: rbd_image.create() if not rbd_image.error: self.config.add_item('disks', self.config_key) self.logger.info("(LUN.allocate) created {}/{} " "successfully".format( self.pool, self.image)) self.num_changes += 1 else: self.error = True self.error_msg = rbd_image.error_msg return None else: # the image isn't there, and this isn't the 'owning' host # so wait until the disk arrives waiting = 0 while self.image not in disk_list: sleep(settings.config.loop_delay) disk_list = RBDDev.rbd_list(pool=self.pool) waiting += settings.config.loop_delay if waiting >= settings.config.time_out: self.error = True self.error_msg = ("(LUN.allocate) timed out waiting " "for rbd to show up") return None else: # requested image is already defined to ceph if rbd_image.valid: # rbd image is OK to use, so ensure it's in the config # object if self.config_key not in self.config.config['disks']: self.config.add_item('disks', self.config_key) else: # rbd image is not valid for export, so abort self.error = True features = ','.join( RBDDev.unsupported_features_list[self.backstore]) self.error_msg = ("(LUN.allocate) rbd '{}' is not compatible " "with LIO\nImage features {} are not" " supported".format(self.image, features)) self.logger.error(self.error_msg) return None self.logger.debug("Check the rbd image size matches the request") # if updates_made is not set, the disk pre-exists so on the owning # host see if it needs to be resized if self.num_changes == 0 and local_gw == self.allocating_host: # check the size, and update if needed rbd_image.rbd_size() if rbd_image.error: self.logger.critical(rbd_image.error_msg) self.error = True self.error_msg = rbd_image.error_msg return None if rbd_image.changed: self.logger.info("rbd image {} resized " "to {}".format(self.config_key, self.size_bytes)) self.num_changes += 1 else: self.logger.debug("rbd image {} size matches the configuration" " file request".format(self.config_key)) self.logger.debug("Begin processing LIO mapping") # now see if we need to add this rbd image to LIO so = self.lio_stg_object() if not so: # this image has not been defined to this hosts LIO, so check the # config for the details and if it's missing define the # wwn/alua_state and update the config if local_gw == self.allocating_host: # first check to see if the device needs adding try: wwn = self.config.config['disks'][self.config_key]['wwn'] except KeyError: wwn = '' if wwn == '': # disk hasn't been defined to LIO yet, it' not been defined # to the config yet and this is the allocating host so = self.add_dev_to_lio() if self.error: return None # lun is now in LIO, time for some housekeeping :P wwn = so._get_wwn() if not keep_dev_in_lio: self.remove_dev_from_lio() if self.error: return None disk_attr = { "wwn": wwn, "image": self.image, "pool": self.pool, "allocating_host": self.allocating_host, "pool_id": rbd_image.pool_id, "controls": self.controls, "backstore": self.backstore, "backstore_object_name": self.backstore_object_name } self.config.update_item('disks', self.config_key, disk_attr) self.logger.debug("(LUN.allocate) registered '{}' with " "wwn '{}' with the config " "object".format(self.image, wwn)) self.logger.info("(LUN.allocate) added '{}/{}' to LIO and" " config object".format( self.pool, self.image)) else: # config object already had wwn for this rbd image so = self.add_dev_to_lio(wwn) if self.error: return None self.update_controls() self.logger.debug("(LUN.allocate) registered '{}' to LIO " "with wwn '{}' from the config " "object".format(self.image, wwn)) self.num_changes += 1 else: # lun is not already in LIO, but this is not the owning node # that defines the wwn we need the wwn from the config # (placed by the allocating host), so we wait! waiting = 0 while waiting < settings.config.time_out: self.config.refresh() if self.config_key in self.config.config['disks']: if 'wwn' in self.config.config['disks'][ self.config_key]: if self.config.config['disks'][ self.config_key]['wwn']: wwn = self.config.config['disks'][ self.config_key]['wwn'] break sleep(settings.config.loop_delay) waiting += settings.config.loop_delay self.logger.debug( "(LUN.allocate) waiting for config object" " to show {} with it's wwn".format(self.image)) if waiting >= settings.config.time_out: self.error = True self.error_msg = ("(LUN.allocate) waited too long for the " "wwn information on image {} to " "arrive".format(self.image)) return None # At this point we have a wwn from the config for this rbd # image, so just add to LIO so = self.add_dev_to_lio(wwn) if self.error: return None self.logger.info("(LUN.allocate) added {} to LIO using wwn " "'{}' defined by {}".format( self.image, wwn, self.allocating_host)) self.num_changes += 1 else: # lun exists in LIO, check the size is correct if not self.lio_size_ok(rbd_image, so): self.error = True self.error_msg = "Unable to sync the rbd device size with LIO" self.logger.critical(self.error_msg) return None self.logger.debug("config meta data for this disk is " "{}".format( self.config.config['disks'][self.config_key])) # the owning host for an image is the only host that commits to the # config if local_gw == self.allocating_host and self.config.changed: self.logger.debug("(LUN.allocate) Committing change(s) to the " "config object in pool {}".format(self.pool)) self.config.commit() self.error = self.config.error self.error_msg = self.config.error_msg if self.error: return None return so
def manage(self, rqst_type, committer=None): """ Manage the allocation or removal of this client :param rqst_type is either 'present' (try and create the nodeACL), or 'absent' - delete the nodeACL :param committer is the host responsible for any commits to the configuration - this is not needed for Ansible management, but is used by the CLI->API->GWClient interaction """ # Build a local object representing the rados configuration object config_object = Config(self.logger) if config_object.error: self.error = True self.error_msg = config_object.error_msg return # use current config to hold a copy of the current rados config # object (dict) self.current_config = config_object.config target_config = self.current_config['targets'][self.target_iqn] update_host = committer self.logger.debug("GWClient.manage) update host to handle any config " "update is {}".format(update_host)) if rqst_type == "present": ################################################################### # Ensure the client exists in LIO # ################################################################### # first look at the request to see if it matches the settings # already in the config object - if so this is just a rerun, or a # reboot so config object updates are not needed when we change # the LIO environment if self.iqn in target_config['clients'].keys(): self.metadata = target_config['clients'][self.iqn] config_image_list = sorted(self.metadata['luns'].keys()) # # Does the request match the current config? auth_config = self.metadata['auth'] config_chap = CHAP(auth_config['username'], auth_config['password'], auth_config['password_encryption_enabled']) if config_chap.error: self.error = True self.error_msg = config_chap.error_msg return # extract the chap_mutual_str from the config object entry config_chap_mutual = CHAP(auth_config['mutual_username'], auth_config['mutual_password'], auth_config['mutual_password_encryption_enabled']) if config_chap_mutual.error: self.error = True self.error_msg = config_chap_mutual.error_msg return if self.username == config_chap.user and \ self.password == config_chap.password and \ self.mutual_username == config_chap_mutual.user and \ self.mutual_password == config_chap_mutual.password and \ config_image_list == sorted(self.requested_images): self.commit_enabled = False else: # requested iqn is not in the config object self.seed_config(config_object) self.metadata = GWClient.seed_metadata self.logger.debug("(manage) config updates to be applied from " "this host: {}".format(self.commit_enabled)) client_exists = self.exists() self.define_client() if self.error: # unable to define the client! return if client_exists and self.metadata["group_name"]: # bypass setup_luns for existing clients that have an # associated host group pass else: # either the client didn't exist (new or boot time), or the # group_name is not defined so run setup_luns for this client disks_config = self.current_config['disks'] bad_images = self.validate_images(disks_config) if not bad_images: self.setup_luns(disks_config) if self.error: return else: # request for images to map to this client that haven't # been added to LIO yet! self.error = True self.error_msg = ("Non-existent images {} requested " "for {}".format(bad_images, self.iqn)) return if not self.username and not self.password and \ not self.mutual_username and not self.mutual_password: self.logger.warning("(main) client '{}' configured without" " security".format(self.iqn)) self.configure_auth(self.username, self.password, self.mutual_username, self.mutual_password, target_config) if self.error: return # check the client object's change count, and update the config # object if this is the updating host if self.change_count > 0: if self.commit_enabled: if update_host == this_host(): # update the config object with this clients settings self.logger.debug("Updating config object metadata " "for '{}'".format(self.iqn)) target_config['clients'][self.iqn] = self.metadata config_object.update_item("targets", self.target_iqn, target_config) # persist the config update config_object.commit() elif rqst_type == 'reconfigure': self.define_client() else: ################################################################### # Remove the requested client from the config object and LIO # ################################################################### if self.exists(): self.define_client() # grab the client and parent tpg objects self.delete() # deletes from the local LIO instance if self.error: return else: # remove this client from the config if update_host == this_host(): self.logger.debug("Removing {} from the config " "object".format(self.iqn)) target_config['clients'].pop(self.iqn) config_object.update_item("targets", self.target_iqn, target_config) config_object.commit() else: # desired state is absent, but the client does not exist # in LIO - Nothing to do! self.logger.info("(main) client {} removal request, but it's" "not in LIO...skipping".format(self.iqn))
def create_disk(self, pool=None, image=None, size=None, count=1, parent=None, create_image=True, backstore=None): rc = 0 if not parent: parent = self local_gw = this_host() disk_key = "{}/{}".format(pool, image) if not self._valid_pool(pool): return self.logger.debug("Creating/mapping disk {}/{}".format(pool, image)) # make call to local api server's disk endpoint disk_api = '{}://localhost:{}/api/disk/{}'.format( self.http_mode, settings.config.api_port, disk_key) api_vars = { 'pool': pool, 'owner': local_gw, 'count': count, 'mode': 'create', 'create_image': 'true' if create_image else 'false', 'backstore': backstore } if size: api_vars['size'] = size.upper() self.logger.debug("Issuing disk create request") api = APIRequest(disk_api, data=api_vars) api.put() if api.response.status_code == 200: # rbd create and map successful across all gateways so request # it's details and add to the UI self.logger.debug("- LUN(s) ready on all gateways") self.logger.info("ok") self.logger.debug("Updating UI for the new disk(s)") for n in range(1, (int(count) + 1), 1): if int(count) > 1: disk_key = "{}/{}{}".format(pool, image, n) else: disk_key = "{}/{}".format(pool, image) disk_api = ('{}://localhost:{}/api/disk/' '{}'.format(self.http_mode, settings.config.api_port, disk_key)) api = APIRequest(disk_api) api.get() if api.response.status_code == 200: try: image_config = api.response.json() except Exception: raise GatewayAPIError("Malformed REST API response") disk_pool = None for current_disk_pool in self.children: if current_disk_pool.name == pool: disk_pool = current_disk_pool break if disk_pool: Disk(disk_pool, disk_key, image_config) else: DiskPool(parent, pool, [image_config]) self.logger.debug("{} added to the UI".format(disk_key)) else: raise GatewayAPIError( "Unable to retrieve disk details " "for '{}' from the API".format(disk_key)) ceph_pools = self.parent.ceph.cluster.pools ceph_pools.refresh() else: self.logger.error("Failed : {}".format( response_message(api.response, self.logger))) rc = 8 return rc
def ui_command_create(self, gateway_name, ip_addresses, nosync=False, skipchecks='false'): """ Define a gateway to the gateway group for this iscsi target. The first host added should be the gateway running the command gateway_name ... should resolve to the hostname of the gateway ip_addresses ... are the IPv4/IPv6 addresses of the interfaces the iSCSI portals should use nosync ......... by default new gateways are sync'd with the existing configuration by cli. By specifying nosync the sync step is bypassed - so the new gateway will need to have it's rbd-target-api daemon restarted to apply the current configuration (default = False) skipchecks ..... set this to true to force gateway validity checks to be bypassed(default = false). This is a developer option ONLY. Skipping these checks has the potential to result in an unstable configuration. """ ip_addresses = [ normalize_ip_address(ip_address) for ip_address in ip_addresses.split(',') ] self.logger.debug("CMD: ../gateways/ create {} {} " "nosync={} skipchecks={}".format( gateway_name, ip_addresses, nosync, skipchecks)) local_gw = this_host() current_gateways = [tgt.name for tgt in self.children] if gateway_name != local_gw and len(current_gateways) == 0: # the first gateway defined must be the local machine. By doing # this the initial create uses localhost, and places it's portal IP # in the gateway ip list. Once the gateway ip list is defined, the # api server can resolve against the gateways - until the list is # defined only a request from localhost is acceptable to the api self.logger.error("The first gateway defined must be the local " "machine") return if skipchecks not in ['true', 'false']: self.logger.error("skipchecks must be either true or false") return if local_gw in current_gateways: current_gateways.remove(local_gw) config = self.parent.parent.parent._get_config() if not config: self.logger.error( "Unable to refresh local config" " over API - sync aborted, restart rbd-target-api" " on {} to sync".format(gateway_name)) target_iqn = self.parent.name target_config = config['targets'][target_iqn] if nosync: sync_text = "sync skipped" else: sync_text = ("sync'ing {} disk(s) and " "{} client(s)".format(len(target_config['disks']), len(target_config['clients']))) if skipchecks == 'true': self.logger.warning("OS version/package checks have been bypassed") self.logger.info("Adding gateway, {}".format(sync_text)) gw_api = '{}://{}:{}/api'.format(self.http_mode, "localhost", settings.config.api_port) gw_rqst = gw_api + '/gateway/{}/{}'.format(target_iqn, gateway_name) gw_vars = { "nosync": nosync, "skipchecks": skipchecks, "ip_address": ','.join(ip_addresses) } api = APIRequest(gw_rqst, data=gw_vars) api.put() msg = response_message(api.response, self.logger) if api.response.status_code != 200: self.logger.error("Failed : {}".format(msg)) return self.logger.debug("{}".format(msg)) self.logger.debug("Adding gw to UI") # Target created OK, get the details back from the gateway and # add to the UI. We have to use the new gateway to ensure what # we get back is current (the other gateways will lag until they see # epoch xattr change on the config object) new_gw_endpoint = ('{}://{}:{}/' 'api'.format(self.http_mode, gateway_name, settings.config.api_port)) api = APIRequest('{}/sysinfo/hostname'.format(new_gw_endpoint)) api.get() gateway_hostname = api.response.json()['data'] config = self.parent.parent.parent._get_config( endpoint=new_gw_endpoint) target_config = config['targets'][target_iqn] portal_config = target_config['portals'][gateway_hostname] Gateway(self, gateway_hostname, portal_config) self.logger.info('ok')
def define_luns(logger, config, gateway): """ define the disks in the config to LIO :param logger: logger object to print to :param config: configuration dict from the rados pool :param gateway: (object) gateway object - used for mapping :raises CephiSCSIError. """ local_gw = this_host() # sort the disks dict keys, so the disks are registered in a specific # sequence disks = config.config['disks'] srtd_disks = sorted(disks) pools = {disks[disk_key]['pool'] for disk_key in srtd_disks} if pools is None: logger.info("No LUNs to export") return True ips = ip_addresses() with rados.Rados(conffile=settings.config.cephconf) as cluster: for pool in pools: logger.debug("Processing rbd's in '{}' pool".format(pool)) with cluster.open_ioctx(pool) as ioctx: pool_disks = [ disk_key for disk_key in srtd_disks if disk_key.startswith(pool + '.') ] for disk_key in pool_disks: is_lun_mapped = False for _, target_config in config.config['targets'].items( ): if local_gw in target_config['portals'] \ and disk_key in target_config['disks']: is_lun_mapped = True break if is_lun_mapped: pool, image_name = disk_key.split('.') try: with rbd.Image(ioctx, image_name) as rbd_image: RBDDev.rbd_lock_cleanup( logger, ips, rbd_image) backstore = config.config['disks'][ disk_key]['backstore'] lun = LUN(logger, pool, image_name, rbd_image.size(), local_gw, backstore) if lun.error: raise CephiSCSIError( "Error defining rbd " "image {}".format(disk_key)) lun.allocate() if lun.error: raise CephiSCSIError( "Error unable to " "register {} with " "LIO - {}".format( disk_key, lun.error_msg)) except rbd.ImageNotFound: raise CephiSCSIError( "Disk '{}' defined to the " "config, but image '{}' can " "not be found in '{}' " "pool".format(disk_key, image_name, pool)) if gateway: # Gateway Mapping : Map the LUN's registered to all tpg's within the # LIO target gateway.manage('map') if gateway.error: raise CephiSCSIError( "Error mapping the LUNs to the tpg's within " "the iscsi Target")
def activate(self): disk = self.config.config['disks'].get(self.config_key, None) if not disk: raise CephiSCSIError("Image {} not found.".format(self.image)) wwn = disk.get('wwn', None) if not wwn: raise CephiSCSIError("LUN {} missing wwn".format(self.image)) # re-add backend storage object so = self.lio_stg_object() if not so: self.add_dev_to_lio(wwn) if self.error: raise CephiSCSIError("LUN activate failure - {}".format( self.error_msg)) # re-add LUN to target local_gw = this_host() targets_items = [ item for item in self.config.config['targets'].items() if self.config_key in item[1]['disks'] and local_gw in item[1]['portals'] ] for target_iqn, target in targets_items: ip_list = target['ip_list'] # Add the mapping for the lun to ensure the block device is # present on all TPG's gateway = GWTarget(self.logger, target_iqn, ip_list) gateway.manage('map') if gateway.error: raise CephiSCSIError("LUN mapping failed - {}".format( gateway.error_msg)) # re-map LUN to hosts client_err = '' for client_iqn in target['clients']: client_metadata = target['clients'][client_iqn] if client_metadata.get('group_name', ''): continue image_list = list(client_metadata['luns'].keys()) if self.config_key not in image_list: continue client_chap = CHAP(client_metadata['auth']['chap']) chap_str = client_chap.chap_str if client_chap.error: raise CephiSCSIError("Password decode issue : " "{}".format(client_chap.error_msg)) client_chap_mutual = CHAP( client_metadata['auth']['chap_mutual']) chap_mutual_str = client_chap_mutual.chap_str if client_chap_mutual.error: raise CephiSCSIError("Password decode issue : " "{}".format( client_chap_mutual.error_msg)) client = GWClient(self.logger, client_iqn, image_list, chap_str, chap_mutual_str, target_iqn) client.manage('present') if client.error: client_err = "LUN mapping failed {} - {}".format( client_iqn, client.error_msg) # re-map LUN to host groups for group_name in target['groups']: host_group = target['groups'][group_name] members = host_group.get('members') disks = host_group.get('disks').keys() if self.config_key not in disks: continue group = Group(self.logger, target_iqn, group_name, members, disks) group.apply() if group.error: client_err = "LUN mapping failed {} - {}".format( group_name, group.error_msg) if client_err: raise CephiSCSIError(client_err)
def define_luns(logger, config, target): """ define the disks in the config to LIO and map to a LUN :param logger: logger object to print to :param config: configuration dict from the rados pool :param target: (object) gateway object - used for mapping :raises CephiSCSIError. """ ips = ip_addresses() local_gw = this_host() target_disks = config.config["targets"][target.iqn]['disks'] if not target_disks: logger.info("No LUNs to export") return disks = {} for disk in target_disks: disks[disk] = config.config['disks'][disk] # sort the disks dict keys, so the disks are registered in a specific # sequence srtd_disks = sorted(disks) pools = {disks[disk_key]['pool'] for disk_key in srtd_disks} ips = ip_addresses() with rados.Rados(conffile=settings.config.cephconf, name=settings.config.cluster_client_name) as cluster: for pool in pools: logger.debug("Processing rbd's in '{}' pool".format(pool)) with cluster.open_ioctx(pool) as ioctx: pool_disks = [ disk_key for disk_key in srtd_disks if disk_key.startswith(pool + '/') ] for disk_key in pool_disks: pool, image_name = disk_key.split('/') with rbd.Image(ioctx, image_name) as rbd_image: disk_config = config.config['disks'][disk_key] backstore = disk_config['backstore'] backstore_object_name = disk_config[ 'backstore_object_name'] lun = LUN(logger, pool, image_name, rbd_image.size(), local_gw, backstore, backstore_object_name) if lun.error: raise CephiSCSIError( "Error defining rbd image {}".format( disk_key)) so = lun.allocate() if lun.error: raise CephiSCSIError("Unable to register {} " "with LIO: {}".format( disk_key, lun.error_msg)) # If not in use by another target on this gw # clean up stale locks. if so.status != 'activated': RBDDev.rbd_lock_cleanup(logger, ips, rbd_image) target._map_lun(config, so) if target.error: raise CephiSCSIError( "Mapping for {} failed: {}".format( disk_key, target.error_msg))
def all_client(client_iqn): """ Handle the client create/delete actions across gateways :param client_iqn: (str) IQN of the client to create or delete **RESTRICTED** Examples: curl --insecure --user admin:admin -X PUT https://192.168.122.69:5001/api/all_client/iqn.1994-05.com.redhat:myhost4 curl --insecure --user admin:admin -X DELETE https://192.168.122.69:5001/api/all_client/iqn.1994-05.com.redhat:myhost4 """ method = {"PUT": 'create', "DELETE": 'delete'} http_mode = 'https' if settings.config.api_secure else 'http' local_gw = this_host() logger.debug("this host is {}".format(local_gw)) gateways = [ key for key in config.config['gateways'] if isinstance(config.config['gateways'][key], dict) ] logger.debug("other gateways - {}".format(gateways)) gateways.remove(local_gw) # committing host is the node responsible for updating the config object api_vars = {"committing_host": local_gw} # validate the PUT/DELETE request first client_usable = valid_client(mode=method[request.method], client_iqn=client_iqn) if client_usable != 'ok': return jsonify(message=client_usable), 400 if request.method == 'PUT': client_api = '{}://127.0.0.1:{}/api/client/{}'.format( http_mode, settings.config.api_port, client_iqn) logger.debug("Processing client CREATE for {}".format(client_iqn)) api = APIRequest(client_api, data=api_vars) api.put() if api.response.status_code == 200: logger.info("Client {} added to local LIO".format(client_iqn)) for gw in gateways: client_api = '{}://{}:{}/api/client/{}'.format( http_mode, gw, settings.config.api_port, client_iqn) logger.debug("sending request to {} to create {}".format( gw, client_iqn)) api = APIRequest(client_api, data=api_vars) api.put() if api.response.status_code == 200: logger.info("Client '{}' added to {}".format( client_iqn, gw)) continue else: # client create failed against the remote LIO instance msg = api.response.json()['message'] logger.error("Client create for {} failed on {} " ": {}".format(client_iqn, gw, msg)) return jsonify(message=msg), 500 # all gateways processed return a success state to the caller return jsonify(message='ok'), 200 else: # client create failed against the local LIO instance msg = api.response.json()['message'] logger.error("Client create on local LIO instance failed " "for {} : {}".format(client_iqn, msg)) return jsonify(message=msg), 500 else: # DELETE client request # Process flow: remote gateways > local > delete config object entry for gw in gateways: client_api = '{}://{}:{}/api/client/{}'.format( http_mode, gw, settings.config.api_port, client_iqn) logger.info("- removing '{}' from {}".format(client_iqn, gw)) api = APIRequest(client_api, data=api_vars) api.delete() if api.response.status_code == 200: logger.info("- '{}' removed".format(client_iqn)) continue elif api.response.status_code == 400: logger.error("- '{}' is in use on {}".format(client_iqn, gw)) return jsonify(message="Client in use"), 400 else: msg = api.response.json()['message'] logger.error("Failed to remove {} from {}".format( client_iqn, gw)) return jsonify(message="failed to remove client '{}' on " "{}".format(client_iqn, msg)), 500 # At this point the other gateways have removed the client, so # remove from the local LIO instance client_api = '{}://127.0.0.1:{}/api/client/{}'.format( http_mode, settings.config.api_port, client_iqn) api = APIRequest(client_api, data=api_vars) api.delete() if api.response.status_code == 200: logger.info("successfully removed '{}'".format(client_iqn)) return jsonify(message="ok"), 200 else: return jsonify(message="Unable to delete {} from local LIO " "instance".format(client_iqn)), \ api.response.status_code
def all_client_auth(client_iqn): """ Coordinate client authentication changes across each gateway node The following parameters are needed to manage client auth :param client_iqn: (str) client IQN name :param chap: (str) chap string of the form user/password or '' **RESTRICTED** """ http_mode = 'https' if settings.config.api_secure else 'http' local_gw = this_host() logger.debug("this host is {}".format(local_gw)) gateways = [ key for key in config.config['gateways'] if isinstance(config.config['gateways'][key], dict) ] logger.debug("other gateways - {}".format(gateways)) gateways.remove(local_gw) lun_list = config.config['clients'][client_iqn]['luns'].keys() image_list = ','.join(lun_list) chap = request.form.get('chap') client_usable = valid_client(mode='auth', client_iqn=client_iqn, chap=chap) if client_usable != 'ok': logger.error("BAD auth request from {}".format(request.remote_addr)) return jsonify(message=client_usable), 400 api_vars = { "committing_host": local_gw, "image_list": image_list, "chap": chap } clientauth_api = '{}://127.0.0.1:{}/api/clientauth/{}'.format( http_mode, settings.config.api_port, client_iqn) logger.debug("Issuing client update to local gw for {}".format(client_iqn)) api = APIRequest(clientauth_api, data=api_vars) api.put() if api.response.status_code == 200: logger.debug("Client update succeeded on local LIO") for gw in gateways: clientauth_api = '{}://{}:{}/api/clientauth/{}'.format( http_mode, gw, settings.config.api_port, client_iqn) logger.debug("updating client {} on {}".format(client_iqn, gw)) api = APIRequest(clientauth_api, data=api_vars) api.put() if api.response.status_code == 200: logger.info("client update successful on {}".format(gw)) continue else: return jsonify(message="client update failed on " "{}".format(gw)), \ api.response.status_code logger.info("All gateways updated") return jsonify(message="ok"), 200 else: # the local update failed, so abort further updates return jsonify(message="Client updated failed on local " "LIO instance"), api.response.status_code
def all_client_luns(client_iqn): """ Coordinate the addition(PUT) and removal(DELETE) of a disk from a client :param client_iqn: (str) IQN of the client :param disk: (str) rbd image name of the format pool.image **RESTRICTED** """ http_mode = 'https' if settings.config.api_secure else 'http' local_gw = this_host() logger.debug("this host is {}".format(local_gw)) gateways = [ key for key in config.config['gateways'] if isinstance(config.config['gateways'][key], dict) ] logger.debug("other gateways - {}".format(gateways)) gateways.remove(local_gw) disk = request.form.get('disk') lun_list = config.config['clients'][client_iqn]['luns'].keys() if request.method == 'PUT': lun_list.append(disk) else: # this is a delete request if disk in lun_list: lun_list.remove(disk) else: return jsonify(message="disk not mapped to client"), 400 chap_obj = CHAP(config.config['clients'][client_iqn]['auth']['chap']) chap = "{}/{}".format(chap_obj.user, chap_obj.password) image_list = ','.join(lun_list) client_usable = valid_client(mode='disk', client_iqn=client_iqn, image_list=image_list) if client_usable != 'ok': logger.error("Bad disk request for client {} : " "{}".format(client_iqn, client_usable)) return jsonify(message=client_usable), 400 # committing host is the local LIO node api_vars = { "committing_host": local_gw, "image_list": image_list, "chap": chap } clientlun_api = '{}://127.0.0.1:{}/api/clientlun/{}'.format( http_mode, settings.config.api_port, client_iqn) api = APIRequest(clientlun_api, data=api_vars) api.put() if api.response.status_code == 200: logger.info("disk mapping update for {} successful".format(client_iqn)) for gw in gateways: clientlun_api = '{}://{}:{}/api/clientlun/{}'.format( http_mode, gw, settings.config.api_port, client_iqn) logger.debug("Updating disk map for {} on GW {}".format( client_iqn, gw)) api = APIRequest(clientlun_api, data=api_vars) api.put() if api.response.status_code == 200: logger.debug("gateway '{}' updated".format(gw)) continue else: logger.error("disk mapping update on {} failed".format(gw)) return jsonify(message="disk map updated failed on " "{}".format(gw)), \ api.response.status_code return jsonify(message="ok"), 200 else: # disk map update failed at the first hurdle! logger.error("disk map update failed on the local LIO instance") return jsonify(message="failed to update local LIO instance"), \ api.response.status_code
def manage(self, mode): """ Manage the definition of the gateway, given a mode of 'target', 'map', 'init' or 'clearconfig'. In 'target' mode the LIO TPG is defined, whereas in map mode, the required LUNs are added to the existing TPG :param mode: run mode - target, map, init or clearconfig (str) :return: None - but sets the objects error flags to be checked by the caller """ config = Config(self.logger) if config.error: self.error = True self.error_msg = config.error_msg return local_gw = this_host() if mode == 'target': if self.exists(): self.load_config() self.check_tpgs() else: self.create_target() if self.error: # return to caller, with error state set return target_config = config.config["targets"][self.iqn] self.update_acl(target_config['acl_enabled']) discovery_auth_config = config.config['discovery_auth'] Discovery.set_discovery_auth_lio( discovery_auth_config['username'], discovery_auth_config['password'], discovery_auth_config['password_encryption_enabled'], discovery_auth_config['mutual_username'], discovery_auth_config['mutual_password'], discovery_auth_config['mutual_password_encryption_enabled']) gateway_group = config.config["gateways"].keys() if "ip_list" not in target_config: target_config['ip_list'] = self.gateway_ip_list config.update_item("targets", self.iqn, target_config) self.config_updated = True if self.controls != target_config.get('controls', {}): target_config['controls'] = self.controls.copy() config.update_item("targets", self.iqn, target_config) self.config_updated = True if local_gw not in gateway_group: gateway_metadata = {"active_luns": 0} config.add_item("gateways", local_gw) config.update_item("gateways", local_gw, gateway_metadata) self.config_updated = True if local_gw not in target_config['portals']: # Update existing gws with the new gw for remote_gw, remote_gw_config in target_config[ 'portals'].items(): if remote_gw_config[ 'gateway_ip_list'] == self.gateway_ip_list: continue inactive_portal_ip = list(self.gateway_ip_list) for portal_ip_address in remote_gw_config[ "portal_ip_addresses"]: inactive_portal_ip.remove(portal_ip_address) remote_gw_config['gateway_ip_list'] = self.gateway_ip_list remote_gw_config['tpgs'] = len(self.tpg_list) remote_gw_config[ 'inactive_portal_ips'] = inactive_portal_ip target_config['portals'][remote_gw] = remote_gw_config # Add the new gw inactive_portal_ip = list(self.gateway_ip_list) for active_portal_ip in self.active_portal_ips: inactive_portal_ip.remove(active_portal_ip) portal_metadata = { "tpgs": len(self.tpg_list), "gateway_ip_list": self.gateway_ip_list, "portal_ip_addresses": self.active_portal_ips, "inactive_portal_ips": inactive_portal_ip } target_config['portals'][local_gw] = portal_metadata target_config['ip_list'] = self.gateway_ip_list config.update_item("targets", self.iqn, target_config) self.config_updated = True if self.config_updated: config.commit() elif mode == 'map': if self.exists(): self.load_config() self.map_luns(config) target_config = config.config["targets"][self.iqn] self.update_acl(target_config['acl_enabled']) else: self.error = True self.error_msg = ("Attempted to map to a gateway '{}' that " "hasn't been defined yet...out of order " "steps?".format(self.iqn)) elif mode == 'init': # init mode just creates the iscsi target definition and updates # the config object. It is used by the CLI only if self.exists(): self.logger.info("GWTarget init request skipped - target " "already exists") else: # create the target self.create_target() # if error happens, we should never store this target to config if self.error: return seed_target = { 'disks': {}, 'clients': {}, 'acl_enabled': True, 'auth': { 'username': '', 'password': '', 'password_encryption_enabled': False, 'mutual_username': '', 'mutual_password': '', 'mutual_password_encryption_enabled': False }, 'portals': {}, 'groups': {}, 'controls': {} } config.add_item("targets", self.iqn, seed_target) config.commit() discovery_auth_config = config.config['discovery_auth'] Discovery.set_discovery_auth_lio( discovery_auth_config['username'], discovery_auth_config['password'], discovery_auth_config['password_encryption_enabled'], discovery_auth_config['mutual_username'], discovery_auth_config['mutual_password'], discovery_auth_config['mutual_password_encryption_enabled'] ) elif mode == 'clearconfig': # Called by API from CLI clearconfig command if self.exists(): self.load_config() self.clear_config(config) if self.error: return target_config = config.config["targets"][self.iqn] if len(target_config['portals']) == 0: config.del_item('targets', self.iqn) else: gw_ips = target_config['portals'][local_gw][ 'portal_ip_addresses'] target_config['portals'].pop(local_gw) ip_list = target_config['ip_list'] for gw_ip in gw_ips: ip_list.remove(gw_ip) if len(ip_list) > 0 and len( target_config['portals'].keys()) > 0: config.update_item('targets', self.iqn, target_config) else: # no more portals in the list, so delete the target config.del_item('targets', self.iqn) remove_gateway = True for _, target in config.config["targets"].items(): if local_gw in target['portals']: remove_gateway = False break if remove_gateway: # gateway is no longer used, so delete it config.del_item('gateways', local_gw) config.commit()
def manage(self, mode): """ Manage the definition of the gateway, given a mode of 'target', 'map', 'init' or 'clearconfig'. In 'target' mode the LIO TPG is defined, whereas in map mode, the required LUNs are added to the existing TPG :param mode: run mode - target, map, init or clearconfig (str) :return: None - but sets the objects error flags to be checked by the caller """ config = Config(self.logger) if config.error: self.error = True self.error_msg = config.error_msg return local_gw = this_host() if mode == 'target': if self.exists(): self.load_config() self.check_tpgs() else: self.create_target() if self.error: # return to caller, with error state set return gateway_group = config.config["gateways"].keys() # this action could be carried out by multiple nodes concurrently, # but since the value is the same (i.e all gateway nodes use the # same iqn) it's not worth worrying about! if "iqn" not in gateway_group: self.config_updated = True config.add_item("gateways", "iqn", initial_value=self.iqn) if "ip_list" not in gateway_group: self.config_updated = True config.add_item("gateways", "ip_list", initial_value=self.gateway_ip_list) if local_gw not in gateway_group: inactive_portal_ip = list(self.gateway_ip_list) inactive_portal_ip.remove(self.active_portal_ip) gateway_metadata = {"portal_ip_address": self.active_portal_ip, "iqn": self.iqn, "active_luns": 0, "tpgs": len(self.tpg_list), "inactive_portal_ips": inactive_portal_ip, "gateway_ip_list": self.gateway_ip_list} config.add_item("gateways", local_gw) config.update_item("gateways", local_gw, gateway_metadata) config.update_item("gateways", "ip_list", self.gateway_ip_list) self.config_updated = True else: # gateway already defined, so check that the IP list it has # matches the current request gw_details = config.config['gateways'][local_gw] if cmp(gw_details['gateway_ip_list'], self.gateway_ip_list) != 0: inactive_portal_ip = list(self.gateway_ip_list) inactive_portal_ip.remove(self.active_portal_ip) gw_details['tpgs'] = len(self.tpg_list) gw_details['gateway_ip_list'] = self.gateway_ip_list gw_details['inactive_portal_ips'] = inactive_portal_ip config.update_item('gateways', local_gw, gw_details) self.config_updated = True if self.config_updated: config.commit() elif mode == 'map': if self.exists(): self.load_config() self.map_luns(config) else: self.error = True self.error_msg = ("Attempted to map to a gateway '{}' that " "hasn't been defined yet...out of order " "steps?".format(self.iqn)) elif mode == 'init': # init mode just creates the iscsi target definition and updates # the config object. It is used by the CLI only if self.exists(): self.logger.info("GWTarget init request skipped - target " "already exists") else: # create the target self.create_target() current_iqn = config.config['gateways'].get('iqn', '') # First gateway asked to create the target will update the # config object if not current_iqn: config.add_item("gateways", "iqn", initial_value=self.iqn) config.commit() elif mode == 'clearconfig': # Called by API from CLI clearconfig command if self.exists(): self.load_config() else: self.error = True self.error_msg = "IQN provided does not exist" self.clear_config() if not self.error: gw_ip = config.config['gateways'][local_gw]['portal_ip_address'] config.del_item('gateways', local_gw) ip_list = config.config['gateways']['ip_list'] ip_list.remove(gw_ip) if len(ip_list) > 0: config.update_item('gateways', 'ip_list', ip_list) else: # no more gateways in the list, so delete remaining items config.del_item('gateways', 'ip_list') config.del_item('gateways', 'iqn') config.del_item('gateways', 'created') config.commit()
def all_disk(image_id): """ Coordinate the create/delete of rbd images across the gateway nodes The "all_" method calls the corresponding disk api entrypoints across each gateway. Processing is done serially: creation is done locally first, then other gateways - whereas, rbd deletion is performed first against remote gateways and then the local machine is used to perform the actual rbd delete. :param image_id: (str) rbd image name of the format pool.image **RESTRICTED** """ http_mode = 'https' if settings.config.api_secure else 'http' local_gw = this_host() logger.debug("this host is {}".format(local_gw)) gateways = [ key for key in config.config['gateways'] if isinstance(config.config['gateways'][key], dict) ] logger.debug("other gateways - {}".format(gateways)) gateways.remove(local_gw) logger.debug("other gw's {}".format(gateways)) if request.method == 'PUT': pool = request.form.get('pool') size = request.form.get('size') mode = request.form.get('mode') pool, image_name = image_id.split('.') disk_usable = valid_disk(pool=pool, image=image_name, size=size, mode=mode) if disk_usable != 'ok': return jsonify(message=disk_usable), 400 # make call to local api server first! disk_api = '{}://127.0.0.1:{}/api/disk/{}'.format( http_mode, settings.config.api_port, image_id) api_vars = { 'pool': pool, 'size': size, 'owner': local_gw, 'mode': mode } logger.debug("Issuing disk request to the local API " "for {}".format(image_id)) api = APIRequest(disk_api, data=api_vars) api.put() if api.response.status_code == 200: logger.info("LUN is ready on this host") for gw in gateways: logger.debug("Adding {} to gw {}".format(image_id, gw)) disk_api = '{}://{}:{}/api/disk/{}'.format( http_mode, gw, settings.config.api_port, image_id) api = APIRequest(disk_api, data=api_vars) api.put() if api.response.status_code == 200: logger.info("LUN is ready on {}".format(gw)) else: return jsonify(message=api.response.json()['message']), 500 else: logger.error(api.response.json()['message']) return jsonify(message=api.response.json()['message']), 500 logger.info("LUN defined to all gateways for {}".format(image_id)) return jsonify(message="ok"), 200 else: # this is a DELETE request pool_name, image_name = image_id.split('.') disk_usable = valid_disk(mode='delete', pool=pool_name, image=image_name) if disk_usable != 'ok': return jsonify(message=disk_usable), 400 api_vars = {'purge_host': local_gw} # process other gateways first for gw_name in gateways: disk_api = '{}://{}:{}/api/disk/{}'.format( http_mode, gw_name, settings.config.api_port, image_id) logger.debug("removing '{}' from {}".format(image_id, gw_name)) api = APIRequest(disk_api, data=api_vars) api.delete() if api.response.status_code == 200: logger.debug("{} removed from {}".format(image_id, gw_name)) elif api.response.status_code == 400: # 400 means the rbd is still allocated to a client msg = api.response.json()['message'] logger.error(msg) return jsonify(message=msg), 400 else: # delete failed - don't know why, pass the error to the # admin and abort msg = api.response.json()['message'] return jsonify(message=msg), 500 # at this point the remote gateways are cleaned up, now perform the # purge on the local host which will also purge the rbd disk_api = '{}://127.0.0.1:{}/api/disk/{}'.format( http_mode, settings.config.api_port, image_id) logger.debug("- removing '{}' from the local " "machine, deleting the rbd".format(image_id)) api = APIRequest(disk_api, data=api_vars) api.delete() if api.response.status_code == 200: logger.debug("- rbd {} deleted".format(image_id)) return jsonify(message="ok"), 200 else: return jsonify(message="failed to delete rbd " "{}".format(image_id)), 500
def manage(self, mode): """ Manage the definition of the gateway, given a mode of 'target', 'map', 'init' or 'clearconfig'. In 'target' mode the LIO TPG is defined, whereas in map mode, the required LUNs are added to the existing TPG :param mode: run mode - target, map, init or clearconfig (str) :return: None - but sets the objects error flags to be checked by the caller """ config = Config(self.logger) if config.error: self.error = True self.error_msg = config.error_msg return local_gw = this_host() if mode == 'target': if self.exists(): self.load_config() self.check_tpgs() else: self.create_target() if self.error: # return to caller, with error state set return Discovery.set_discovery_auth_lio( config.config['discovery_auth']['chap'], config.config['discovery_auth']['chap_mutual']) target_config = config.config["targets"][self.iqn] gateway_group = config.config["gateways"].keys() if "ip_list" not in target_config: target_config['ip_list'] = self.gateway_ip_list config.update_item("targets", self.iqn, target_config) self.config_updated = True if self.controls != target_config.get('controls', {}): target_config['controls'] = self.controls.copy() config.update_item("targets", self.iqn, target_config) self.config_updated = True if local_gw not in gateway_group: gateway_metadata = {"active_luns": 0} config.add_item("gateways", local_gw) config.update_item("gateways", local_gw, gateway_metadata) self.config_updated = True if local_gw not in target_config['portals']: inactive_portal_ip = list(self.gateway_ip_list) inactive_portal_ip.remove(self.active_portal_ip) portal_metadata = { "tpgs": len(self.tpg_list), "gateway_ip_list": self.gateway_ip_list, "portal_ip_address": self.active_portal_ip, "inactive_portal_ips": inactive_portal_ip } target_config['portals'][local_gw] = portal_metadata target_config['ip_list'] = self.gateway_ip_list config.update_item("targets", self.iqn, target_config) self.config_updated = True else: # gateway already defined, so check that the IP list it has # matches the current request portal_details = target_config['portals'][local_gw] if portal_details['gateway_ip_list'] != self.gateway_ip_list: inactive_portal_ip = list(self.gateway_ip_list) inactive_portal_ip.remove(self.active_portal_ip) portal_details['gateway_ip_list'] = self.gateway_ip_list portal_details['tpgs'] = len(self.tpg_list) portal_details['inactive_portal_ips'] = inactive_portal_ip target_config['portals'][local_gw] = portal_details config.update_item("targets", self.iqn, target_config) self.config_updated = True if self.config_updated: config.commit() elif mode == 'map': if self.exists(): self.load_config() self.map_luns(config) else: self.error = True self.error_msg = ("Attempted to map to a gateway '{}' that " "hasn't been defined yet...out of order " "steps?".format(self.iqn)) elif mode == 'init': # init mode just creates the iscsi target definition and updates # the config object. It is used by the CLI only if self.exists(): self.logger.info("GWTarget init request skipped - target " "already exists") else: # create the target self.create_target() seed_target = { 'disks': [], 'clients': {}, 'portals': {}, 'groups': {}, 'controls': {} } config.add_item("targets", self.iqn, seed_target) config.commit() Discovery.set_discovery_auth_lio( config.config['discovery_auth']['chap'], config.config['discovery_auth']['chap_mutual']) elif mode == 'clearconfig': # Called by API from CLI clearconfig command if self.exists(): self.load_config() else: self.error = True self.error_msg = "Target {} does not exist on {}".format( self.iqn, local_gw) return target_config = config.config["targets"][self.iqn] self.clear_config() if not self.error: if len(target_config['portals']) == 0: config.del_item('targets', self.iqn) else: gw_ip = target_config['portals'][local_gw][ 'portal_ip_address'] target_config['portals'].pop(local_gw) ip_list = target_config['ip_list'] ip_list.remove(gw_ip) if len(ip_list) > 0 and len( target_config['portals'].keys()) > 0: config.update_item('targets', self.iqn, target_config) else: # no more portals in the list, so delete the target config.del_item('targets', self.iqn) remove_gateway = True for _, target in config.config["targets"].items(): if local_gw in target['portals']: remove_gateway = False break if remove_gateway: # gateway is no longer used, so delete it config.del_item('gateways', local_gw) config.commit()
def define_luns(gateway): """ define the disks in the config to LIO :param gateway: (object) gateway object - used for mapping :return: None """ local_gw = this_host() # sort the disks dict keys, so the disks are registered in a specific # sequence disks = config.config['disks'] srtd_disks = sorted(disks) pools = {disks[disk_key]['pool'] for disk_key in srtd_disks} if pools: with rados.Rados(conffile=settings.config.cephconf) as cluster: for pool in pools: logger.debug("Processing rbd's in '{}' pool".format(pool)) with cluster.open_ioctx(pool) as ioctx: pool_disks = [disk_key for disk_key in srtd_disks if disk_key.startswith(pool)] for disk_key in pool_disks: pool, image_name = disk_key.split('.') try: with rbd.Image(ioctx, image_name) as rbd_image: image_bytes = rbd_image.size() image_size_h = human_size(image_bytes) lun = LUN(logger, pool, image_name, image_size_h, local_gw) if lun.error: halt("Error defining rbd image " "{}".format(disk_key)) lun.allocate() if lun.error: halt("Error unable to register {} with " "LIO - {}".format(disk_key, lun.error_msg)) except rbd.ImageNotFound: halt("Disk '{}' defined to the config, but image " "'{}' can not be found in " "'{}' pool".format(disk_key, image_name, pool)) # Gateway Mapping : Map the LUN's registered to all tpg's within the # LIO target gateway.manage('map') if gateway.error: halt("Error mapping the LUNs to the tpg's within the iscsi Target") else: logger.info("No LUNs to export")
def __init__(self, logger, config): self.logger = logger self.config = config self.hostname = this_host()
def __init__(self): self.metrics = {} self._root = RTSRoot() # use utils.this_host self.gw_name = this_host()