def test_upgrade_config(self): gateway_conf_initial = json.dumps(self.gateway_conf_initial) with mock.patch.object(Config, 'init_config', return_value=True), \ mock.patch.object(Config, '_read_config_object', return_value=gateway_conf_initial), \ mock.patch.object(Config, 'commit'): config = Config(self.logger) self.maxDiff = None iqn = 'iqn.2003-01.com.redhat.iscsi-gw:iscsi-igw' self.assertGreater( config.config['targets'][iqn]['created'], self.gateway_conf_latest['targets'][iqn]['created']) self.assertGreater( config.config['targets'][iqn]['updated'], self.gateway_conf_latest['targets'][iqn]['updated']) config.config['targets'][iqn]['created'] = '2018/12/07 09:19:01' config.config['targets'][iqn]['updated'] = '2018/12/07 09:19:02' disk = 'rbd/disk_1' self.assertGreater( config.config['disks'][disk]['updated'], self.gateway_conf_latest['disks'][disk]['updated']) config.config['disks'][disk]['updated'] = '2018/12/07 09:19:03' self.assertDictEqual(config.config, self.gateway_conf_latest)
def __init__(self, logger, group_name, members=[], disks=[]): """ Manage a host group definition. The input for the group object is the desired state of the group where the logic enforced produces an idempotent group definition across API/CLI and more importantly Ansible :param logger: (logging object) used for centralised logging :param group_name: (str) group name :param members: (list) iscsi IQN's of the clients :param disks: (list) disk names of the format pool.image """ self.logger = logger self.error = False self.error_msg = '' self.num_changes = 0 self.config = Config(logger) if self.config.error: self.error = self.config.error self.error_msg = self.config.error_msg return # check that the config object has a group section Group._check_config(self.logger, self.config) self.group_name = group_name self.group_members = members self.disks = disks self.logger.debug("Group : name={}".format(self.group_name)) self.logger.debug("Group : members={}".format(self.group_members)) self.logger.debug("Group : disks={}".format(self.disks))
def __init__(self, logger, pool, image, size, allocating_host): self.logger = logger self.image = image self.pool = pool self.pool_id = 0 self.size = size self.config_key = '{}.{}'.format(self.pool, self.image) # the allocating host could be fqdn or shortname - but the config # only uses shortname so it needs to be converted to shortname format self.allocating_host = allocating_host.split('.')[0] self.owner = '' # gateway host that owns the preferred path for this LUN self.error = False self.error_msg = '' self.num_changes = 0 self.dm_device = '' # e.g. /dev/mapper/0-58f8b515f007c self.config = Config(logger) if self.config.error: self.error = self.config.error self.error_msg = self.config.error_msg return self._validate_request()
def __init__(self, logger, iqn, gateway_ip_list, enable_portal=True): """ Instantiate the class :param iqn: iscsi iqn name for the gateway :param gateway_ip_list: list of IP addresses to be defined as portals to LIO :return: gateway object """ self.error = False self.error_msg = '' self.enable_portal = enable_portal # boolean to trigger portal # IP creation self.logger = logger # logger object self.iqn = iqn # If the ip list received has data in it, this is a target we need to # act on the IP's provided, otherwise just set to null if gateway_ip_list: # if the ip list provided doesn't match any ip of this host, abort # the assumption here is that we'll only have one matching ip in # the list! matching_ip = set(gateway_ip_list).intersection(ipv4_addresses()) if len(list(matching_ip)) == 0: self.error = True self.error_msg = ("gateway IP addresses provided do not match" " any ip on this host") return self.active_portal_ip = list(matching_ip)[0] self.logger.debug("active portal will use " "{}".format(self.active_portal_ip)) self.gateway_ip_list = gateway_ip_list self.logger.debug("tpg's will be defined in this order" " - {}".format(self.gateway_ip_list)) else: # without gateway_ip_list passed in this is a 'init' or # 'clearconfig' request self.gateway_ip_list = [] self.active_portal_ip = [] self.changes_made = False self.config_updated = False # self.portal = None self.target = None self.tpg = None self.tpg_list = [] self.config = Config(self.logger) if self.config.error: self.error = self.config.error self.error_msg = self.config.error_msg self.controls = self.config.config.get('controls', {}).copy() self._add_properies()
def __init__(self, logger, pool, image, size, allocating_host): self.logger = logger self.image = image self.pool = pool self.pool_id = 0 self.size = size self.size_bytes = convert_2_bytes(size) self.config_key = '{}.{}'.format(self.pool, self.image) self.controls = {} # the allocating host could be fqdn or shortname - but the config # only uses shortname so it needs to be converted to shortname format self.allocating_host = allocating_host.split('.')[0] self.owner = '' # gateway that owns the preferred path for this LUN self.error = False self.error_msg = '' self.num_changes = 0 self.config = Config(logger) if self.config.error: self.error = self.config.error self.error_msg = self.config.error_msg return self._validate_request() if self.config_key in self.config.config['disks']: self.controls = self.config.config['disks'][self.config_key].get( 'controls', {}).copy()
def ansible_main(): # Configures the gateway on the host. All images defined are added to # the default tpg for later allocation to clients fields = { "gateway_iqn": { "required": True, "type": "str" }, "gateway_ip_list": { "required": True }, # "type": "list"}, "mode": { "required": True, "choices": ['target', 'map'] } } module = AnsibleModule( argument_spec=fields, # noqa: F405 supports_check_mode=False) cfg = Config(logger) if cfg.config['version'] > 3: module.fail_json(msg="Unsupported iscsigws.yml/iscsi-gws.yml setting " "detected. Remove depreciated iSCSI target, LUN, " "client, and gateway settings from " "iscsigws.yml/iscsi-gws.yml. See " "iscsigws.yml.sample for list of supported " "settings") gateway_iqn = module.params['gateway_iqn'] gateway_ip_list = module.params['gateway_ip_list'].split(',') mode = module.params['mode'] if not valid_ip(gateway_ip_list): module.fail_json(msg="Invalid gateway IP address(es) provided - port " "22 check failed ({})".format(gateway_ip_list)) logger.info("START - GATEWAY configuration started - mode {}".format(mode)) gateway = GWTarget(logger, gateway_iqn, gateway_ip_list) if gateway.error: logger.critical("(ansible_main) Gateway init failed - " "{}".format(gateway.error_msg)) module.fail_json(msg="iSCSI gateway initialisation failed " "({})".format(gateway.error_msg)) gateway.manage(mode) if gateway.error: logger.critical("(main) Gateway creation or load failed, " "unable to continue") module.fail_json(msg="iSCSI gateway creation/load failure " "({})".format(gateway.error_msg)) logger.info("END - GATEWAY configuration complete") module.exit_json(changed=gateway.changes_made, meta={"msg": "Gateway setup complete"})
def test_upgrade_config(self): gateway_conf_initial = json.dumps(self.gateway_conf_initial) # First, the upgrade is executed on node1 with mock.patch.object(Config, 'init_config', return_value=True), \ mock.patch.object(Config, '_read_config_object', return_value=gateway_conf_initial), \ mock.patch.object(Config, 'commit'), \ mock.patch("socket.gethostname", return_value='node1'), \ mock.patch("socket.getfqdn", return_value='node1.ceph.local'): config = Config(self.logger) # And then, the upgrade is executed on node2 current_config = json.dumps(config.config) with mock.patch.object(Config, 'init_config', return_value=True), \ mock.patch.object(Config, '_read_config_object', return_value=current_config), \ mock.patch.object(Config, 'commit'), \ mock.patch("socket.gethostname", return_value='node2'),\ mock.patch("socket.getfqdn", return_value='node2.ceph.local'): config = Config(self.logger) self.maxDiff = None iqn = 'iqn.2003-01.com.redhat.iscsi-gw:iscsi-igw' self.assertGreater( config.config['targets'][iqn]['created'], self.gateway_conf_latest['targets'][iqn]['created']) self.assertGreater( config.config['targets'][iqn]['updated'], self.gateway_conf_latest['targets'][iqn]['updated']) config.config['targets'][iqn]['created'] = '2018/12/07 09:19:01' config.config['targets'][iqn]['updated'] = '2018/12/07 09:19:02' disk = 'rbd/disk_1' self.assertGreater( config.config['disks'][disk]['updated'], self.gateway_conf_latest['disks'][disk]['updated']) config.config['disks'][disk]['updated'] = '2018/12/07 09:19:03' self.assertDictEqual(config.config, self.gateway_conf_latest)
def __init__(self, cfg_type, cfg_type_key, logger, control_settings): self.cfg_type = cfg_type self.cfg_type_key = cfg_type_key self.logger = logger self.config = Config(self.logger) if self.config.error: raise CephiSCSIError(self.config.error_msg) # Copy of controls that will not be written until commit is called. # To update the kernel call the child object's update function. self.controls = self._get_config_controls().copy() self._add_properies(control_settings)
def __init__(self, logger, target_iqn, group_name, members=[], disks=[]): """ Manage a host group definition. The input for the group object is the desired state of the group where the logic enforced produces an idempotent group definition across API/CLI and more importantly Ansible :param logger: (logging object) used for centralised logging :param target_iqn: (str) target iqn :param group_name: (str) group name :param members: (list) iscsi IQN's of the clients :param disks: (list) disk names of the format pool/image """ self.logger = logger self.error = False self.error_msg = '' self.num_changes = 0 self.config = Config(logger) if self.config.error: self.error = self.config.error self.error_msg = self.config.error_msg return self.target_iqn = target_iqn self.group_name = group_name self.group_members = members self.disks = disks target_config = self.config.config['targets'][self.target_iqn] if group_name in target_config['groups']: self.new_group = False else: self.new_group = True self.logger.debug("Group : name={}".format(self.group_name)) self.logger.debug("Group : members={}".format(self.group_members)) self.logger.debug("Group : disks={}".format(self.disks))
def ansible_main(): fields = { "mode": { "required": True, "type": "str", "choices": ["gateway", "disks"] } } module = AnsibleModule( argument_spec=fields, # noqa F405 supports_check_mode=False) run_mode = module.params['mode'] changes_made = False logger.info("START - GATEWAY configuration PURGE started, run mode " "is {}".format(run_mode)) cfg = Config(logger) # # Purge gateway configuration, if the config has gateways if run_mode == 'gateway': changes_made = delete_gateway_config(cfg, module) elif run_mode == 'disks' and len(cfg.config['disks'].keys()) > 0: # # Remove the disks on this host, that have been registered in the # config object changes_made = delete_images(cfg) logger.info("END - GATEWAY configuration PURGE complete") module.exit_json(changed=changes_made, meta={ "msg": "Purge of iSCSI settings ({}) " "complete".format(run_mode) })
def manage(self, mode): """ Manage the definition of the gateway, given a mode of 'target', 'map', 'init' or 'clearconfig'. In 'target' mode the LIO TPG is defined, whereas in map mode, the required LUNs are added to the existing TPG :param mode: run mode - target, map, init or clearconfig (str) :return: None - but sets the objects error flags to be checked by the caller """ config = Config(self.logger) if config.error: self.error = True self.error_msg = config.error_msg return local_gw = this_host() if mode == 'target': if self.exists(): self.load_config() self.check_tpgs() else: self.create_target() if self.error: # return to caller, with error state set return Discovery.set_discovery_auth_lio( config.config['discovery_auth']['chap'], config.config['discovery_auth']['chap_mutual']) target_config = config.config["targets"][self.iqn] gateway_group = config.config["gateways"].keys() if "ip_list" not in target_config: target_config['ip_list'] = self.gateway_ip_list config.update_item("targets", self.iqn, target_config) self.config_updated = True if self.controls != target_config.get('controls', {}): target_config['controls'] = self.controls.copy() config.update_item("targets", self.iqn, target_config) self.config_updated = True if local_gw not in gateway_group: gateway_metadata = {"active_luns": 0} config.add_item("gateways", local_gw) config.update_item("gateways", local_gw, gateway_metadata) self.config_updated = True if local_gw not in target_config['portals']: inactive_portal_ip = list(self.gateway_ip_list) inactive_portal_ip.remove(self.active_portal_ip) portal_metadata = { "tpgs": len(self.tpg_list), "gateway_ip_list": self.gateway_ip_list, "portal_ip_address": self.active_portal_ip, "inactive_portal_ips": inactive_portal_ip } target_config['portals'][local_gw] = portal_metadata target_config['ip_list'] = self.gateway_ip_list config.update_item("targets", self.iqn, target_config) self.config_updated = True else: # gateway already defined, so check that the IP list it has # matches the current request portal_details = target_config['portals'][local_gw] if portal_details['gateway_ip_list'] != self.gateway_ip_list: inactive_portal_ip = list(self.gateway_ip_list) inactive_portal_ip.remove(self.active_portal_ip) portal_details['gateway_ip_list'] = self.gateway_ip_list portal_details['tpgs'] = len(self.tpg_list) portal_details['inactive_portal_ips'] = inactive_portal_ip target_config['portals'][local_gw] = portal_details config.update_item("targets", self.iqn, target_config) self.config_updated = True if self.config_updated: config.commit() elif mode == 'map': if self.exists(): self.load_config() self.map_luns(config) else: self.error = True self.error_msg = ("Attempted to map to a gateway '{}' that " "hasn't been defined yet...out of order " "steps?".format(self.iqn)) elif mode == 'init': # init mode just creates the iscsi target definition and updates # the config object. It is used by the CLI only if self.exists(): self.logger.info("GWTarget init request skipped - target " "already exists") else: # create the target self.create_target() seed_target = { 'disks': [], 'clients': {}, 'portals': {}, 'groups': {}, 'controls': {} } config.add_item("targets", self.iqn, seed_target) config.commit() Discovery.set_discovery_auth_lio( config.config['discovery_auth']['chap'], config.config['discovery_auth']['chap_mutual']) elif mode == 'clearconfig': # Called by API from CLI clearconfig command if self.exists(): self.load_config() else: self.error = True self.error_msg = "Target {} does not exist on {}".format( self.iqn, local_gw) return target_config = config.config["targets"][self.iqn] self.clear_config() if not self.error: if len(target_config['portals']) == 0: config.del_item('targets', self.iqn) else: gw_ip = target_config['portals'][local_gw][ 'portal_ip_address'] target_config['portals'].pop(local_gw) ip_list = target_config['ip_list'] ip_list.remove(gw_ip) if len(ip_list) > 0 and len( target_config['portals'].keys()) > 0: config.update_item('targets', self.iqn, target_config) else: # no more portals in the list, so delete the target config.del_item('targets', self.iqn) remove_gateway = True for _, target in config.config["targets"].items(): if local_gw in target['portals']: remove_gateway = False break if remove_gateway: # gateway is no longer used, so delete it config.del_item('gateways', local_gw) config.commit()
def ansible_main(): fields = {"mode": {"required": True, "type": "str", "choices": ["gateway", "disks"] } } module = AnsibleModule(argument_spec=fields, supports_check_mode=False) run_mode = module.params['mode'] changes_made = False logger.info("START - GATEWAY configuration PURGE started, run mode " "is {}".format(run_mode)) cfg = Config(logger) this_host = socket.gethostname().split('.')[0] perform_cleanup_tasks = is_cleanup_host(cfg) # # Purge gateway configuration, if the config has gateways if run_mode == 'gateway' and len(cfg.config['gateways'].keys()) > 0: lio = LIO() gateway = Gateway(cfg) if gateway.session_count() > 0: module.fail_json(msg="Unable to purge - gateway still has active " "sessions") gateway.drop_target(this_host) if gateway.error: module.fail_json(msg=gateway.error_msg) lio.drop_lun_maps(cfg, perform_cleanup_tasks) if lio.error: module.fail_json(msg=lio.error_msg) if gateway.changed or lio.changed: # each gateway removes it's own entry from the config cfg.del_item("gateways", this_host) if perform_cleanup_tasks: cfg.reset = True # drop all client definitions from the configuration object client_names = cfg.config["clients"].keys() for client in client_names: cfg.del_item("clients", client) cfg.del_item("gateways", "iqn") cfg.del_item("gateways", "created") cfg.del_item("gateways", "ip_list") cfg.commit() changes_made = True elif run_mode == 'disks' and len(cfg.config['disks'].keys()) > 0: # # Remove the disks on this host, that have been registered in the # config object # # if the owner field for a disk is set to this host, this host can # safely delete it # nb. owner gets set at rbd allocation and mapping time images_left = [] # delete_list will contain a list of pool/image names where the owner # is this host delete_list = [key.replace('.', '/', 1) for key in cfg.config['disks'] if cfg.config['disks'][key]['owner'] == this_host] if delete_list: images_left = delete_group(module, delete_list, cfg) # if the delete list still has entries we had problems deleting the # images if images_left: module.fail_json(msg="Problems deleting the following rbd's : " "{}".format(','.join(images_left))) changes_made = cfg.changed logger.debug("ending lock state variable {}".format(cfg.config_locked)) logger.info("END - GATEWAY configuration PURGE complete") module.exit_json(changed=changes_made, meta={"msg": "Purge of iSCSI settings ({}) " "complete".format(run_mode)})
def manage(self, rqst_type): """ Manage the allocation or removal of this client :param rqst_type is either present (try and create the nodeACL), or absent - delete the nodeACL """ # Build a local object representing the rados configuration object config_object = Config(self.logger) if config_object.error: self.error = True self.error_msg = config_object.error_msg return # use current config to hold a copy of the current rados config object (dict) self.current_config = config_object.config running_under_ansible = ansible_control() self.logger.debug("(GWClient.manage) running under ansible? {}".format( running_under_ansible)) if running_under_ansible: update_host = GWClient.get_update_host(self.current_config) self.logger.debug( "GWClient.manage) update host to handle any config update is {}" .format(update_host)) else: update_host = None if rqst_type == "present": ############################################################################### # Ensure the client exists in LIO # ############################################################################### # first look at the request to see if it matches the settings already in the config # object - if so this is just a rerun, or a reboot so config object updates are not # needed when we change the LIO environment if self.iqn in self.current_config['clients'].keys(): self.metadata = self.current_config['clients'][self.iqn] config_image_list = sorted(self.metadata['luns'].keys()) # # Does the request match the current config? if self.chap == self.metadata['auth']['chap'] and \ config_image_list == sorted(self.requested_images): self.commit_enabled = False else: # requested iqn is not in the config object if running_under_ansible: if update_host == gethostname().split('.')[0]: self.seed_config(config_object) else: # not ansible, so just run the command self.seed_config(config_object) self.metadata = GWClient.seed_metadata self.logger.debug( "(manage) config updates to be applied from this host: {}". format(self.commit_enabled)) self.define_client() if self.error: # unable to define the client! return bad_images = self.validate_images() if not bad_images: self.setup_luns() if self.error: return if '/' in self.chap: self.configure_auth('chap', self.chap) if self.error: return else: self.logger.warning( "(main) client '{}' configured without security". format(self.iqn)) else: # request for images to map to this client that haven't been added to LIO yet! self.error = True self.error_msg = "Non-existent images {} requested for {}".format( bad_images, self.iqn) return # check the client object's change count, and update the config object if this is the updating host if self.change_count > 0: if self.commit_enabled: if running_under_ansible: if update_host == gethostname().split('.')[0]: # update the config object with this clients settings config_object.update_item("clients", self.iqn, self.metadata) # persist the config update config_object.commit() else: # this was a request directly (over API?) config_object.update_item("clients", self.iqn, self.metadata) # persist the config update config_object.commit() else: ############################################################################### # Remove the requested client from the config object and LIO # ############################################################################### if self.exists(): self.define_client() # grab the client and parent tpg objects self.delete() # deletes from the local LIO instance if self.error: return else: # remove this client from the config if running_under_ansible: if update_host == gethostname().split('.')[0]: self.logger.debug( "Removing {} from the config object".format( self.iqn)) config_object.del_item("clients", self.iqn) config_object.commit() else: config_object.del_item("clients", self.iqn) config_object.commit() else: # desired state is absent, but the client does not exist in LIO - Nothing to do! self.logger.info( "(main) client {} removal request, but the client is not " "defined to LIO...skipping".format(self.iqn))
def manage(self, mode): """ Manage the definition of the gateway, given a mode of 'target', 'map', 'init' or 'clearconfig'. In 'target' mode the LIO TPG is defined, whereas in map mode, the required LUNs are added to the existing TPG :param mode: run mode - target, map, init or clearconfig (str) :return: None - but sets the objects error flags to be checked by the caller """ config = Config(self.logger) if config.error: self.error = True self.error_msg = config.error_msg return local_gw = this_host() if mode == 'target': if self.exists(): self.load_config() self.check_tpgs() else: self.create_target() if self.error: # return to caller, with error state set return target_config = config.config["targets"][self.iqn] self.update_acl(target_config['acl_enabled']) discovery_auth_config = config.config['discovery_auth'] Discovery.set_discovery_auth_lio( discovery_auth_config['username'], discovery_auth_config['password'], discovery_auth_config['password_encryption_enabled'], discovery_auth_config['mutual_username'], discovery_auth_config['mutual_password'], discovery_auth_config['mutual_password_encryption_enabled']) gateway_group = config.config["gateways"].keys() if "ip_list" not in target_config: target_config['ip_list'] = self.gateway_ip_list config.update_item("targets", self.iqn, target_config) self.config_updated = True if self.controls != target_config.get('controls', {}): target_config['controls'] = self.controls.copy() config.update_item("targets", self.iqn, target_config) self.config_updated = True if local_gw not in gateway_group: gateway_metadata = {"active_luns": 0} config.add_item("gateways", local_gw) config.update_item("gateways", local_gw, gateway_metadata) self.config_updated = True if local_gw not in target_config['portals']: # Update existing gws with the new gw for remote_gw, remote_gw_config in target_config[ 'portals'].items(): if remote_gw_config[ 'gateway_ip_list'] == self.gateway_ip_list: continue inactive_portal_ip = list(self.gateway_ip_list) for portal_ip_address in remote_gw_config[ "portal_ip_addresses"]: inactive_portal_ip.remove(portal_ip_address) remote_gw_config['gateway_ip_list'] = self.gateway_ip_list remote_gw_config['tpgs'] = len(self.tpg_list) remote_gw_config[ 'inactive_portal_ips'] = inactive_portal_ip target_config['portals'][remote_gw] = remote_gw_config # Add the new gw inactive_portal_ip = list(self.gateway_ip_list) for active_portal_ip in self.active_portal_ips: inactive_portal_ip.remove(active_portal_ip) portal_metadata = { "tpgs": len(self.tpg_list), "gateway_ip_list": self.gateway_ip_list, "portal_ip_addresses": self.active_portal_ips, "inactive_portal_ips": inactive_portal_ip } target_config['portals'][local_gw] = portal_metadata target_config['ip_list'] = self.gateway_ip_list config.update_item("targets", self.iqn, target_config) self.config_updated = True if self.config_updated: config.commit() elif mode == 'map': if self.exists(): self.load_config() self.map_luns(config) target_config = config.config["targets"][self.iqn] self.update_acl(target_config['acl_enabled']) else: self.error = True self.error_msg = ("Attempted to map to a gateway '{}' that " "hasn't been defined yet...out of order " "steps?".format(self.iqn)) elif mode == 'init': # init mode just creates the iscsi target definition and updates # the config object. It is used by the CLI only if self.exists(): self.logger.info("GWTarget init request skipped - target " "already exists") else: # create the target self.create_target() # if error happens, we should never store this target to config if self.error: return seed_target = { 'disks': {}, 'clients': {}, 'acl_enabled': True, 'auth': { 'username': '', 'password': '', 'password_encryption_enabled': False, 'mutual_username': '', 'mutual_password': '', 'mutual_password_encryption_enabled': False }, 'portals': {}, 'groups': {}, 'controls': {} } config.add_item("targets", self.iqn, seed_target) config.commit() discovery_auth_config = config.config['discovery_auth'] Discovery.set_discovery_auth_lio( discovery_auth_config['username'], discovery_auth_config['password'], discovery_auth_config['password_encryption_enabled'], discovery_auth_config['mutual_username'], discovery_auth_config['mutual_password'], discovery_auth_config['mutual_password_encryption_enabled'] ) elif mode == 'clearconfig': # Called by API from CLI clearconfig command if self.exists(): self.load_config() self.clear_config(config) if self.error: return target_config = config.config["targets"][self.iqn] if len(target_config['portals']) == 0: config.del_item('targets', self.iqn) else: gw_ips = target_config['portals'][local_gw][ 'portal_ip_addresses'] target_config['portals'].pop(local_gw) ip_list = target_config['ip_list'] for gw_ip in gw_ips: ip_list.remove(gw_ip) if len(ip_list) > 0 and len( target_config['portals'].keys()) > 0: config.update_item('targets', self.iqn, target_config) else: # no more portals in the list, so delete the target config.del_item('targets', self.iqn) remove_gateway = True for _, target in config.config["targets"].items(): if local_gw in target['portals']: remove_gateway = False break if remove_gateway: # gateway is no longer used, so delete it config.del_item('gateways', local_gw) config.commit()
def manage(self, mode): """ Manage the definition of the gateway, given a mode of 'target' or 'map'. In 'target' mode the LIO TPG is defined, whereas in map mode, the required LUNs are added to the existing TPG :param mode: run mode - target or map (str) :return: None - but sets the objects error flags to be checked by the caller """ config = Config(self.logger) if config.error: self.error = True self.error_msg = config.error_msg return if mode == 'target': if self.exists(): self.load_config() self.check_tpgs() else: self.create_target() if self.error: # return to caller, with error state set return # ensure that the config object has an entry for this gateway this_host = socket.gethostname().split('.')[0] gateway_group = config.config["gateways"].keys() # this action could be carried out by multiple nodes concurrently, but since the value # is the same (i.e all gateway nodes use the same iqn) it's not worth worrying about! if "iqn" not in gateway_group: self.config_updated = True config.add_item("gateways", "iqn", initial_value=self.iqn) if "ip_list" not in gateway_group: self.config_updated = True config.add_item("gateways", "ip_list", initial_value=self.gateway_ip_list) if this_host not in gateway_group: inactive_portal_ip = list(self.gateway_ip_list) inactive_portal_ip.remove(self.active_portal_ip) gateway_metadata = { "portal_ip_address": self.active_portal_ip, "iqn": self.iqn, "active_luns": 0, "tpgs": len(self.tpg_list), "inactive_portal_ips": inactive_portal_ip, "gateway_ip_list": self.gateway_ip_list } config.add_item("gateways", this_host) config.update_item("gateways", this_host, gateway_metadata) self.config_updated = True else: # gateway already defined, so check that the IP list it has matches the # current request gw_details = config.config['gateways'][this_host] if cmp(gw_details['gateway_ip_list'], self.gateway_ip_list) != 0: inactive_portal_ip = list(self.gateway_ip_list) inactive_portal_ip.remove(self.active_portal_ip) gw_details['tpgs'] = len(self.tpg_list) gw_details['gateway_ip_list'] = self.gateway_ip_list gw_details['inactive_portal_ips'] = inactive_portal_ip config.update_item('gateways', this_host, gw_details) self.config_updated = True if self.config_updated: config.commit() elif mode == 'map': if self.exists(): self.load_config() self.map_luns(config) else: self.error = True self.error_msg = ( "Attempted to map to a gateway '{}' that hasn't been defined yet..." "out of order steps?".format(self.iqn))
syslog_format = logging.Formatter("%(message)s") syslog_handler.setFormatter(syslog_format) # file target - more verbose logging for diagnostics file_handler = RotatingFileHandler( '/var/log/rbd-target-gw/rbd-target-gw.log', maxBytes=5242880, backupCount=7) file_handler.setLevel(logging.DEBUG) file_format = logging.Formatter( "%(asctime)s [%(levelname)8s] - %(message)s") file_handler.setFormatter(file_format) logger.addHandler(syslog_handler) logger.addHandler(file_handler) # config_loading is defined in the outer-scope allowing it to be used as # a flag to indicate when the apply_config function is running to prevent # multiple reloads from being triggered concurrently config_loading = False settings.init() # config is set in the outer scope, so it's easily accessible to the # api classes config = Config(logger) if config.error: halt("Unable to open/read the configuration object") else: main()
def manage(self, mode): """ Manage the definition of the gateway, given a mode of 'target', 'map', 'init' or 'clearconfig'. In 'target' mode the LIO TPG is defined, whereas in map mode, the required LUNs are added to the existing TPG :param mode: run mode - target, map, init or clearconfig (str) :return: None - but sets the objects error flags to be checked by the caller """ config = Config(self.logger) if config.error: self.error = True self.error_msg = config.error_msg return local_gw = this_host() if mode == 'target': if self.exists(): self.load_config() self.check_tpgs() else: self.create_target() if self.error: # return to caller, with error state set return gateway_group = config.config["gateways"].keys() # this action could be carried out by multiple nodes concurrently, # but since the value is the same (i.e all gateway nodes use the # same iqn) it's not worth worrying about! if "iqn" not in gateway_group: self.config_updated = True config.add_item("gateways", "iqn", initial_value=self.iqn) if "ip_list" not in gateway_group: self.config_updated = True config.add_item("gateways", "ip_list", initial_value=self.gateway_ip_list) if local_gw not in gateway_group: inactive_portal_ip = list(self.gateway_ip_list) inactive_portal_ip.remove(self.active_portal_ip) gateway_metadata = {"portal_ip_address": self.active_portal_ip, "iqn": self.iqn, "active_luns": 0, "tpgs": len(self.tpg_list), "inactive_portal_ips": inactive_portal_ip, "gateway_ip_list": self.gateway_ip_list} config.add_item("gateways", local_gw) config.update_item("gateways", local_gw, gateway_metadata) config.update_item("gateways", "ip_list", self.gateway_ip_list) self.config_updated = True else: # gateway already defined, so check that the IP list it has # matches the current request gw_details = config.config['gateways'][local_gw] if cmp(gw_details['gateway_ip_list'], self.gateway_ip_list) != 0: inactive_portal_ip = list(self.gateway_ip_list) inactive_portal_ip.remove(self.active_portal_ip) gw_details['tpgs'] = len(self.tpg_list) gw_details['gateway_ip_list'] = self.gateway_ip_list gw_details['inactive_portal_ips'] = inactive_portal_ip config.update_item('gateways', local_gw, gw_details) self.config_updated = True if self.config_updated: config.commit() elif mode == 'map': if self.exists(): self.load_config() self.map_luns(config) else: self.error = True self.error_msg = ("Attempted to map to a gateway '{}' that " "hasn't been defined yet...out of order " "steps?".format(self.iqn)) elif mode == 'init': # init mode just creates the iscsi target definition and updates # the config object. It is used by the CLI only if self.exists(): self.logger.info("GWTarget init request skipped - target " "already exists") else: # create the target self.create_target() current_iqn = config.config['gateways'].get('iqn', '') # First gateway asked to create the target will update the # config object if not current_iqn: config.add_item("gateways", "iqn", initial_value=self.iqn) config.commit() elif mode == 'clearconfig': # Called by API from CLI clearconfig command if self.exists(): self.load_config() else: self.error = True self.error_msg = "IQN provided does not exist" self.clear_config() if not self.error: gw_ip = config.config['gateways'][local_gw]['portal_ip_address'] config.del_item('gateways', local_gw) ip_list = config.config['gateways']['ip_list'] ip_list.remove(gw_ip) if len(ip_list) > 0: config.update_item('gateways', 'ip_list', ip_list) else: # no more gateways in the list, so delete remaining items config.del_item('gateways', 'ip_list') config.del_item('gateways', 'iqn') config.del_item('gateways', 'created') config.commit()
def manage(self, rqst_type, committer=None): """ Manage the allocation or removal of this client :param rqst_type is either 'present' (try and create the nodeACL), or 'absent' - delete the nodeACL :param committer is the host responsible for any commits to the configuration - this is not needed for Ansible management, but is used by the CLI->API->GWClient interaction """ # Build a local object representing the rados configuration object config_object = Config(self.logger) if config_object.error: self.error = True self.error_msg = config_object.error_msg return # use current config to hold a copy of the current rados config # object (dict) self.current_config = config_object.config target_config = self.current_config['targets'][self.target_iqn] update_host = committer self.logger.debug("GWClient.manage) update host to handle any config " "update is {}".format(update_host)) if rqst_type == "present": ################################################################### # Ensure the client exists in LIO # ################################################################### # first look at the request to see if it matches the settings # already in the config object - if so this is just a rerun, or a # reboot so config object updates are not needed when we change # the LIO environment if self.iqn in target_config['clients'].keys(): self.metadata = target_config['clients'][self.iqn] config_image_list = sorted(self.metadata['luns'].keys()) # # Does the request match the current config? auth_config = self.metadata['auth'] config_chap = CHAP(auth_config['username'], auth_config['password'], auth_config['password_encryption_enabled']) if config_chap.error: self.error = True self.error_msg = config_chap.error_msg return # extract the chap_mutual_str from the config object entry config_chap_mutual = CHAP(auth_config['mutual_username'], auth_config['mutual_password'], auth_config['mutual_password_encryption_enabled']) if config_chap_mutual.error: self.error = True self.error_msg = config_chap_mutual.error_msg return if self.username == config_chap.user and \ self.password == config_chap.password and \ self.mutual_username == config_chap_mutual.user and \ self.mutual_password == config_chap_mutual.password and \ config_image_list == sorted(self.requested_images): self.commit_enabled = False else: # requested iqn is not in the config object self.seed_config(config_object) self.metadata = GWClient.seed_metadata self.logger.debug("(manage) config updates to be applied from " "this host: {}".format(self.commit_enabled)) client_exists = self.exists() self.define_client() if self.error: # unable to define the client! return if client_exists and self.metadata["group_name"]: # bypass setup_luns for existing clients that have an # associated host group pass else: # either the client didn't exist (new or boot time), or the # group_name is not defined so run setup_luns for this client disks_config = self.current_config['disks'] bad_images = self.validate_images(disks_config) if not bad_images: self.setup_luns(disks_config) if self.error: return else: # request for images to map to this client that haven't # been added to LIO yet! self.error = True self.error_msg = ("Non-existent images {} requested " "for {}".format(bad_images, self.iqn)) return if not self.username and not self.password and \ not self.mutual_username and not self.mutual_password: self.logger.warning("(main) client '{}' configured without" " security".format(self.iqn)) self.configure_auth(self.username, self.password, self.mutual_username, self.mutual_password, target_config) if self.error: return # check the client object's change count, and update the config # object if this is the updating host if self.change_count > 0: if self.commit_enabled: if update_host == this_host(): # update the config object with this clients settings self.logger.debug("Updating config object metadata " "for '{}'".format(self.iqn)) target_config['clients'][self.iqn] = self.metadata config_object.update_item("targets", self.target_iqn, target_config) # persist the config update config_object.commit() elif rqst_type == 'reconfigure': self.define_client() else: ################################################################### # Remove the requested client from the config object and LIO # ################################################################### if self.exists(): self.define_client() # grab the client and parent tpg objects self.delete() # deletes from the local LIO instance if self.error: return else: # remove this client from the config if update_host == this_host(): self.logger.debug("Removing {} from the config " "object".format(self.iqn)) target_config['clients'].pop(self.iqn) config_object.update_item("targets", self.target_iqn, target_config) config_object.commit() else: # desired state is absent, but the client does not exist # in LIO - Nothing to do! self.logger.info("(main) client {} removal request, but it's" "not in LIO...skipping".format(self.iqn))
def manage(self, rqst_type, committer=None): """ Manage the allocation or removal of this client :param rqst_type is either 'present' (try and create the nodeACL), or 'absent' - delete the nodeACL :param committer is the host responsible for any commits to the configuration - this is not needed for Ansible management, but is used by the CLI->API->GWClient interaction """ # Build a local object representing the rados configuration object config_object = Config(self.logger) if config_object.error: self.error = True self.error_msg = config_object.error_msg return # use current config to hold a copy of the current rados config # object (dict) self.current_config = config_object.config running_under_ansible = ansible_control() self.logger.debug("(GWClient.manage) running under ansible?" " {}".format(running_under_ansible)) if running_under_ansible: update_host = GWClient.get_update_host(self.current_config) else: update_host = committer self.logger.debug("GWClient.manage) update host to handle any config " "update is {}".format(update_host)) if rqst_type == "present": ################################################################### # Ensure the client exists in LIO # ################################################################### # first look at the request to see if it matches the settings # already in the config object - if so this is just a rerun, or a # reboot so config object updates are not needed when we change # the LIO environment if self.iqn in self.current_config['clients'].keys(): self.metadata = self.current_config['clients'][self.iqn] config_image_list = sorted(self.metadata['luns'].keys()) # # Does the request match the current config? # extract the chap_str from the config object entry config_chap = CHAP(self.metadata['auth']['chap']) chap_str = config_chap.chap_str if config_chap.error: self.error = True self.error_msg = config_chap.error_msg return if self.chap == chap_str and \ config_image_list == sorted(self.requested_images): self.commit_enabled = False else: # requested iqn is not in the config object if running_under_ansible: if update_host == gethostname().split('.')[0]: self.seed_config(config_object) else: # not ansible, so just run the command self.seed_config(config_object) self.metadata = GWClient.seed_metadata self.logger.debug("(manage) config updates to be applied from " "this host: {}".format(self.commit_enabled)) self.define_client() if self.error: # unable to define the client! return # if a group name has been set on the client, we need to bypass # lun setup if not self.metadata["group_name"]: # no group_name, so the client is managed individually bad_images = self.validate_images() if not bad_images: self.setup_luns() if self.error: return else: # request for images to map to this client that haven't # been added to LIO yet! self.error = True self.error_msg = ("Non-existent images {} requested " "for {}".format(bad_images, self.iqn)) return # if '/' in self.chap: if self.chap == '': self.logger.warning("(main) client '{}' configured without" " security".format(self.iqn)) self.configure_auth('chap', self.chap) if self.error: return # check the client object's change count, and update the config # object if this is the updating host if self.change_count > 0: if self.commit_enabled: if update_host == gethostname().split('.')[0]: # update the config object with this clients settings self.logger.debug("Updating config object metadata " "for '{}'".format(self.iqn)) config_object.update_item("clients", self.iqn, self.metadata) # persist the config update config_object.commit() else: ################################################################### # Remove the requested client from the config object and LIO # ################################################################### if self.exists(): self.define_client() # grab the client and parent tpg objects self.delete() # deletes from the local LIO instance if self.error: return else: # remove this client from the config if update_host == gethostname().split('.')[0]: self.logger.debug("Removing {} from the config " "object".format(self.iqn)) config_object.del_item("clients", self.iqn) config_object.commit() else: # desired state is absent, but the client does not exist # in LIO - Nothing to do! self.logger.info("(main) client {} removal request, but it's" "not in LIO...skipping".format(self.iqn))