def __init__(self): self.block_store = {} self.target = {} self.root = RTSRoot() self.iscsi = FabricModule('iscsi') self.mapped_luns = {} self.get_block_store_objects() self.get_targets()
def access_group_init_add(req, ag_name, init_id, init_type): if init_type != 'iscsi': raise TargetdError(TargetdError.NO_SUPPORT, "Only support iscsi") tpg = _get_iscsi_tpg() # Pre-check: # 1. Already in requested access group, return silently. # 2. Initiator does not exist. # 3. Initiator not used by other access group. if init_id in list(NodeACLGroup(tpg, ag_name).wwns): return for node_acl_group in tpg.node_acl_groups: if init_id in list(node_acl_group.wwns): raise TargetdError( TargetdError.EXISTS_INITIATOR, "Requested init_id is used by other access group") for node_acl in tpg.node_acls: if init_id == node_acl.node_wwn: raise TargetdError(TargetdError.EXISTS_INITIATOR, "Requested init_id is in use") NodeACLGroup(tpg, ag_name).add_acl(init_id) RTSRoot().save_to_file()
def refresh(self): self._children = set([]) for so in RTSRoot().storage_objects: if so.plugin == 'user': idx = so.config.find("/") handler = so.config[:idx] if handler == self.handler: ui_so = self.so_cls(so, self)
def access_group_init_del(req, ag_name, init_id, init_type): if init_type != 'iscsi': raise TargetdError(TargetdError.NO_SUPPORT, "Only support iscsi") tpg = _get_iscsi_tpg() # Pre-check: # 1. Initiator is not in requested access group, return silently. if init_id not in list(NodeACLGroup(tpg, ag_name).wwns): return NodeACLGroup(tpg, ag_name).remove_acl(init_id) RTSRoot().save_to_file()
def access_group_map_destroy(req, pool_name, vol_name, ag_name): tpg = _get_iscsi_tpg() node_acl_group = NodeACLGroup(tpg, ag_name) tpg_lun = _tpg_lun_of(tpg, pool_name, vol_name) for map_group in node_acl_group.mapped_lun_groups: if map_group.tpg_lun == tpg_lun: map_group.delete() if not any(tpg_lun.mapped_luns): # If LUN is not masked to any access group or initiator # remove LUN instance. lun_so = tpg_lun.storage_object tpg_lun.delete() lun_so.delete() RTSRoot().save_to_file()
def export_create(req, pool, vol, initiator_wwn, lun): fm = FabricModule('iscsi') t = Target(fm, target_name) tpg = TPG(t, 1) tpg.enable = True tpg.set_attribute("authentication", '0') NetworkPortal(tpg, "0.0.0.0") na = NodeACL(tpg, initiator_wwn) tpg_lun = _tpg_lun_of(tpg, pool, vol) # only add mapped lun if it doesn't exist for tmp_mlun in tpg_lun.mapped_luns: if tmp_mlun.mapped_lun == lun and tmp_mlun.parent_nodeacl == na: break else: MappedLUN(na, lun, tpg_lun) RTSRoot().save_to_file()
def access_group_map_create(req, pool_name, vol_name, ag_name, h_lun_id=None): tpg = _get_iscsi_tpg() tpg.enable = True tpg.set_attribute("authentication", '0') set_portal_addresses(tpg) tpg_lun = _tpg_lun_of(tpg, pool_name, vol_name) # Pre-Check: # 1. Already mapped to requested access group, return None if any(tpg_lun.mapped_luns): tgt_map_list = access_group_map_list(req) for tgt_map in tgt_map_list: if tgt_map['ag_name'] == ag_name and \ tgt_map['pool_name'] == pool_name and \ tgt_map['vol_name'] == vol_name: # Already masked. return None node_acl_group = NodeACLGroup(tpg, ag_name) if not any(node_acl_group.wwns): # Non-existent access group means volume mapping status will not be # stored. This should be considered as an error instead of silently # returning. raise TargetdError(TargetdError.NOT_FOUND_ACCESS_GROUP, "Access group not found") if h_lun_id is None: # Find out next available host LUN ID # Assuming max host LUN ID is MAX_LUN free_h_lun_ids = set(range(MAX_LUN + 1)) - \ set([int(x.mapped_lun) for x in tpg_lun.mapped_luns]) if len(free_h_lun_ids) == 0: raise TargetdError(TargetdError.NO_FREE_HOST_LUN_ID, "All host LUN ID 0 ~ %d is in use" % MAX_LUN) else: h_lun_id = free_h_lun_ids.pop() node_acl_group.mapped_lun_group(h_lun_id, tpg_lun) RTSRoot().save_to_file()
def initiator_set_auth(req, initiator_wwn, in_user, in_pass, out_user, out_pass): fm = FabricModule('iscsi') t = Target(fm, target_name) tpg = TPG(t, 1) na = NodeACL(tpg, initiator_wwn) if not in_user or not in_pass: # rtslib treats '' as its NULL value for these in_user = in_pass = '' if not out_user or not out_pass: out_user = out_pass = '' na.chap_userid = in_user na.chap_password = in_pass na.chap_mutual_userid = out_user na.chap_mutual_password = out_pass RTSRoot().save_to_file()
def export_destroy(req, pool, vol, initiator_wwn): mod = pool_module(pool) fm = FabricModule('iscsi') t = Target(fm, target_name) tpg = TPG(t, 1) na = NodeACL(tpg, initiator_wwn) pool_dev_name = mod.pool2dev_name(pool) for mlun in na.mapped_luns: # all SOs are Block so we can access udev_path safely if mod.has_udev_path(mlun.tpg_lun.storage_object.udev_path): mlun_vg, mlun_name = \ mod.split_udev_path(mlun.tpg_lun.storage_object.udev_path) if mlun_vg == pool_dev_name and mlun_name == vol: tpg_lun = mlun.tpg_lun mlun.delete() # be tidy and delete unused tpg lun mappings? if not any(tpg_lun.mapped_luns): so = tpg_lun.storage_object tpg_lun.delete() so.delete() break else: raise TargetdError(TargetdError.NOT_FOUND_VOLUME_EXPORT, "Volume '%s' not found in %s exports" % (vol, initiator_wwn)) # Clean up tree if branch has no leaf if not any(na.mapped_luns): na.delete() if not any(tpg.node_acls): tpg.delete() if not any(t.tpgs): t.delete() RTSRoot().save_to_file()
def export_destroy(req, pool, vol, initiator_wwn): pool_check(pool) fm = FabricModule('iscsi') t = Target(fm, target_name) tpg = TPG(t, 1) na = NodeACL(tpg, initiator_wwn) vg_name, thin_pool = get_vg_lv(pool) for mlun in na.mapped_luns: # all SOs are Block so we can access udev_path safely mlun_vg, mlun_name = \ mlun.tpg_lun.storage_object.udev_path.split("/")[2:] if mlun_vg == vg_name and mlun_name == vol: tpg_lun = mlun.tpg_lun mlun.delete() # be tidy and delete unused tpg lun mappings? if not any(tpg_lun.mapped_luns): so = tpg_lun.storage_object tpg_lun.delete() so.delete() break else: raise TargetdError( -151, "Volume '%s' not found in %s exports" % (vol, initiator_wwn)) # Clean up tree if branch has no leaf if not any(na.mapped_luns): na.delete() if not any(tpg.node_acls): tpg.delete() if not any(t.tpgs): t.delete() RTSRoot().save_to_file()
def access_group_create(req, ag_name, init_id, init_type): if init_type != 'iscsi': raise TargetdError(TargetdError.NO_SUPPORT, "Only support iscsi") name_check(ag_name) tpg = _get_iscsi_tpg() # Pre-check: # 1. Name conflict: requested name is in use # 2. Initiator conflict: request initiator is in use for node_acl_group in tpg.node_acl_groups: if node_acl_group.name == ag_name: raise TargetdError(TargetdError.NAME_CONFLICT, "Requested access group name is in use") if init_id in list(i.node_wwn for i in tpg.node_acls): raise TargetdError(TargetdError.EXISTS_INITIATOR, "Requested init_id is in use") node_acl_group = NodeACLGroup(tpg, ag_name) node_acl_group.add_acl(init_id) RTSRoot().save_to_file()
class UIRoot(UINode): ''' The targetcli hierarchy root node. ''' def __init__(self, shell, as_root=False): UINode.__init__(self, '/', shell=shell) self.as_root = as_root self.rtsroot = RTSRoot() def refresh(self): ''' Refreshes the tree of target fabric modules. ''' self._children = set([]) UIBackstores(self) # only show fabrics present in the system for fm in self.rtsroot.fabric_modules: if fm.wwns == None or any(fm.wwns): UIFabricModule(fm, self) def ui_command_saveconfig(self, savefile=default_save_file): ''' Saves the current configuration to a file so that it can be restored on next boot. ''' self.assert_root() savefile = os.path.expanduser(savefile) # Only save backups if saving to default location if savefile == default_save_file: backup_dir = os.path.dirname(savefile) + "/backup" backup_name = "saveconfig-" + \ datetime.now().strftime("%Y%m%d-%H:%M:%S") + ".json" backupfile = backup_dir + "/" + backup_name with ignored(IOError): shutil.copy(savefile, backupfile) # Kill excess backups backups = sorted( glob(os.path.dirname(savefile) + "/backup/*.json")) files_to_unlink = list(reversed(backups))[kept_backups:] for f in files_to_unlink: os.unlink(f) self.shell.log.info("Last %d configs saved in %s." % \ (kept_backups, backup_dir)) self.rtsroot.save_to_file(savefile) self.shell.log.info("Configuration saved to %s" % savefile) def ui_command_restoreconfig(self, savefile=default_save_file, clear_existing=False): ''' Restores configuration from a file. ''' self.assert_root() savefile = os.path.expanduser(savefile) if not os.path.isfile(savefile): self.shell.log.info("Restore file %s not found" % savefile) return errors = self.rtsroot.restore_from_file(savefile, clear_existing) self.refresh() if errors: raise ExecutionError("Configuration restored, %d recoverable errors:\n%s" % \ (len(errors), "\n".join(errors))) self.shell.log.info("Configuration restored from %s" % savefile) def ui_complete_saveconfig(self, parameters, text, current_param): ''' Auto-completes the file name ''' if current_param != 'savefile': return [] completions = complete_path(text, stat.S_ISREG) if len(completions) == 1 and not completions[0].endswith('/'): completions = [completions[0] + ' '] return completions ui_complete_restoreconfig = ui_complete_saveconfig def ui_command_clearconfig(self, confirm=None): ''' Removes entire configuration of backstores and targets ''' self.assert_root() confirm = self.ui_eval_param(confirm, 'bool', False) self.rtsroot.clear_existing(confirm=confirm) self.shell.log.info("All configuration cleared") self.refresh() def ui_command_version(self): ''' Displays the targetcli and support libraries versions. ''' from targetcli import __version__ as targetcli_version self.shell.log.info("targetcli version %s" % targetcli_version) def ui_command_sessions(self, action="list", sid=None): ''' Displays a detailed list of all open sessions. PARAMETERS ========== I{action} --------- The I{action} is one of: - B{list} gives a short session list - B{detail} gives a detailed list I{sid} ------ You can specify an I{sid} to only list this one, with or without details. SEE ALSO ======== status ''' indent_step = 4 base_steps = 0 action_list = ("list", "detail") if action not in action_list: raise ExecutionError("action must be one of: %s" % ", ".join(action_list)) if sid is not None: try: int(sid) except ValueError: raise ExecutionError("sid must be a number, '%s' given" % sid) def indent_print(text, steps): console = self.shell.con console.display(console.indent(text, indent_step * steps), no_lf=True) def print_session(session): acl = session['parent_nodeacl'] indent_print("alias: %(alias)s\tsid: %(id)i type: " \ "%(type)s session-state: %(state)s" % session, base_steps) if action == 'detail': if self.as_root: if acl.authenticate_target: auth = " (authenticated)" else: auth = " (NOT AUTHENTICATED)" else: auth = "" indent_print("name: %s%s" % (acl.node_wwn, auth), base_steps + 1) for mlun in acl.mapped_luns: plugin = mlun.tpg_lun.storage_object.plugin name = mlun.tpg_lun.storage_object.name if mlun.write_protect: mode = "r" else: mode = "rw" indent_print( "mapped-lun: %d backstore: %s/%s mode: %s" % (mlun.mapped_lun, plugin, name, mode), base_steps + 1) for connection in session['connections']: indent_print("address: %(address)s (%(transport)s) cid: " \ "%(cid)i connection-state: %(cstate)s" % \ connection, base_steps + 1) if sid: printed_sessions = [ x for x in self.rtsroot.sessions if x['id'] == int(sid) ] else: printed_sessions = list(self.rtsroot.sessions) if len(printed_sessions): for session in printed_sessions: print_session(session) else: if sid is None: indent_print("(no open sessions)", base_steps) else: raise ExecutionError("no session found with sid %i" % int(sid))
class UIRoot(UINode): ''' The targetcli hierarchy root node. ''' def __init__(self, shell, as_root=False): UINode.__init__(self, '/', shell=shell) self.as_root = as_root self.rtsroot = RTSRoot() def refresh(self): ''' Refreshes the tree of target fabric modules. ''' self._children = set([]) # Invalidate any rtslib caches if 'invalidate_caches' in dir(RTSRoot): self.rtsroot.invalidate_caches() UIBackstores(self) # only show fabrics present in the system for fm in self.rtsroot.fabric_modules: if fm.wwns == None or any(fm.wwns): UIFabricModule(fm, self) def _compare_files(self, backupfile, savefile): ''' Compare backfile and saveconfig file ''' if (os.path.splitext(backupfile)[1] == '.gz'): try: with gzip.open(backupfile, 'rb') as fbkp: fdata_bkp = fbkp.read() except IOError as e: self.shell.log.warning("Could not gzip open backupfile %s: %s" % (backupfile, e.strerror)) else: try: with open(backupfile, 'rb') as fbkp: fdata_bkp = fbkp.read() except IOError as e: self.shell.log.warning("Could not open backupfile %s: %s" % (backupfile, e.strerror)) try: with open(savefile, 'rb') as f: fdata = f.read() except IOError as e: self.shell.log.warning("Could not open saveconfig file %s: %s" % (savefile, e.strerror)) if fdata_bkp == fdata: return True else: return False def _save_backups(self, savefile): ''' Take backup of config-file if needed. ''' # Only save backups if saving to default location if savefile != default_save_file: return backup_dir = os.path.dirname(savefile) + "/backup/" backup_name = "saveconfig-" + \ datetime.now().strftime("%Y%m%d-%H:%M:%S") + "-json.gz" backupfile = backup_dir + backup_name backup_error = None if not os.path.exists(backup_dir): try: os.makedirs(backup_dir) except OSError as exe: raise ExecutionError("Cannot create backup directory [%s] %s." % (backup_dir, exe.strerror)) # Only save backups if savefile exits if not os.path.exists(savefile): return backed_files_list = sorted(glob(os.path.dirname(savefile) + \ "/backup/saveconfig-*json*")) # Save backup if backup dir is empty, or savefile is differnt from recent backup copy if not backed_files_list or not self._compare_files(backed_files_list[-1], savefile): try: with open(savefile, 'rb') as f_in, gzip.open(backupfile, 'wb') as f_out: shutil.copyfileobj(f_in, f_out) f_out.flush() except IOError as ioe: backup_error = ioe.strerror or "Unknown error" if backup_error == None: # remove excess backups max_backup_files = int(self.shell.prefs['max_backup_files']) try: with open(universal_prefs_file) as prefs: backups = [line for line in prefs.read().splitlines() if re.match('^max_backup_files\s*=', line)] if max_backup_files < int(backups[0].split('=')[1].strip()): max_backup_files = int(backups[0].split('=')[1].strip()) except: self.shell.log.debug("No universal prefs file '%s'." % universal_prefs_file) files_to_unlink = list(reversed(backed_files_list))[max_backup_files - 1:] for f in files_to_unlink: with ignored(IOError): os.unlink(f) self.shell.log.info("Last %d configs saved in %s." % (max_backup_files, backup_dir)) else: self.shell.log.warning("Could not create backup file %s: %s." % (backupfile, backup_error)) def ui_command_saveconfig(self, savefile=default_save_file): ''' Saves the current configuration to a file so that it can be restored on next boot. ''' self.assert_root() if not savefile: savefile = default_save_file savefile = os.path.expanduser(savefile) self._save_backups(savefile) self.rtsroot.save_to_file(savefile) self.shell.log.info("Configuration saved to %s" % savefile) def ui_command_restoreconfig(self, savefile=default_save_file, clear_existing=False): ''' Restores configuration from a file. ''' self.assert_root() savefile = os.path.expanduser(savefile) if not os.path.isfile(savefile): self.shell.log.info("Restore file %s not found" % savefile) return errors = self.rtsroot.restore_from_file(savefile, clear_existing) self.refresh() if errors: raise ExecutionError("Configuration restored, %d recoverable errors:\n%s" % \ (len(errors), "\n".join(errors))) self.shell.log.info("Configuration restored from %s" % savefile) def ui_complete_saveconfig(self, parameters, text, current_param): ''' Auto-completes the file name ''' if current_param != 'savefile': return [] completions = complete_path(text, stat.S_ISREG) if len(completions) == 1 and not completions[0].endswith('/'): completions = [completions[0] + ' '] return completions ui_complete_restoreconfig = ui_complete_saveconfig def ui_command_clearconfig(self, confirm=None): ''' Removes entire configuration of backstores and targets ''' self.assert_root() confirm = self.ui_eval_param(confirm, 'bool', False) self.rtsroot.clear_existing(confirm=confirm) self.shell.log.info("All configuration cleared") self.refresh() def ui_command_version(self): ''' Displays the targetcli and support libraries versions. ''' from targetcli import __version__ as targetcli_version self.shell.log.info("targetcli version %s" % targetcli_version) def ui_command_sessions(self, action="list", sid=None): ''' Displays a detailed list of all open sessions. PARAMETERS ========== I{action} --------- The I{action} is one of: - B{list} gives a short session list - B{detail} gives a detailed list I{sid} ------ You can specify an I{sid} to only list this one, with or without details. SEE ALSO ======== status ''' indent_step = 4 base_steps = 0 action_list = ("list", "detail") if action not in action_list: raise ExecutionError("action must be one of: %s" % ", ".join(action_list)) if sid is not None: try: int(sid) except ValueError: raise ExecutionError("sid must be a number, '%s' given" % sid) def indent_print(text, steps): console = self.shell.con console.display(console.indent(text, indent_step * steps), no_lf=True) def print_session(session): acl = session['parent_nodeacl'] indent_print("alias: %(alias)s\tsid: %(id)i type: " \ "%(type)s session-state: %(state)s" % session, base_steps) if action == 'detail': if self.as_root: if acl.authenticate_target: auth = " (authenticated)" else: auth = " (NOT AUTHENTICATED)" else: auth = "" indent_print("name: %s%s" % (acl.node_wwn, auth), base_steps + 1) for mlun in acl.mapped_luns: plugin = mlun.tpg_lun.storage_object.plugin name = mlun.tpg_lun.storage_object.name if mlun.write_protect: mode = "r" else: mode = "rw" indent_print("mapped-lun: %d backstore: %s/%s mode: %s" % (mlun.mapped_lun, plugin, name, mode), base_steps + 1) for connection in session['connections']: indent_print("address: %(address)s (%(transport)s) cid: " \ "%(cid)i connection-state: %(cstate)s" % \ connection, base_steps + 1) if sid: printed_sessions = [x for x in self.rtsroot.sessions if x['id'] == int(sid)] else: printed_sessions = list(self.rtsroot.sessions) if len(printed_sessions): for session in printed_sessions: print_session(session) else: if sid is None: indent_print("(no open sessions)", base_steps) else: raise ExecutionError("no session found with sid %i" % int(sid))
class TargetManager: 'Manages ZVOL based iSCSI targets for Emulab diskless booting' # Constructor def __init__(self): self.block_store = {} self.target = {} self.root = RTSRoot() self.iscsi = FabricModule('iscsi') self.mapped_luns = {} self.get_block_store_objects() self.get_targets() def save(self): '''Save the current configuration''' self.root.save_to_file() # Get list of block storage objects def get_block_store_objects(self): self.block_store = {} for storage_object in self.root.storage_objects: if storage_object.plugin == "block": self.block_store[storage_object.name] = storage_object # Get a list of iscsi targets and associated luns, acls and portals # This builds a data structure that is a hash that hash other hashes # as values, and then other hashes, etc. To see what the data structure # looks like, run targetcli from the command line and issue the ls command. # # This data structure mimics that list for fast lookup for creating # shares for lots of nodes. # # This code is really confusing, in case you couldn't tell. # # target 0..N -> target.wwn # | # +---tpgs List of target portal groups, this code assumes only one # | self.target[wwn]['tpg'][tpg.tag]['acl'][initiator_name'] # = mapped_lun # | # +--acls List of initiator names that can log into this iscsi # target # | self.target[wwn]['tpg'][tpg.tag]['acl'] = { # initiator_name : acl # } # | # +--luns List of LUNS for this TPG # | self.target[wwn]['lun'][lun.storage_object.name] = lun # | # +--portals List of portals for this TPG # self.target[wwn]['portal'][portal.ip_address:portal.port] = portal # There can be any number of targets, each uniquely identified by its wwn # (World Wide Name) which is also known as the initiator name. This is # the unique name assigned to each client. The client knows about this # name either by looking at its kernel parameters, the initiator name # stored in the BIOS, but usually in /etc/iscsi/initiatorname.iscsi # # self.target[wwn]['tpg'] [tpg.tag] ['acl'] [initiator_name] = # MappedLUN object # self.target[wwn]['lun'] [lun_storage_object.name] = LUN object # self.target[wwn]['portal'] [portal_id] = Portal object # def get_targets(self): for target in list(self.iscsi.targets): wwn = target.wwn self.target[wwn] = {'target': target, 'tpg': {}} for tpg in target.tpgs: self.target[wwn]['tpg'][tpg.tag] = { 'tpg': tpg, 'acl': {}, 'lun': {}, 'portal': {} } tpg_tag = self.target[wwn]['tpg'][tpg.tag] for acl in tpg.node_acls: tpg_tag['acl'][acl.node_wwn] = acl for lun in tpg.luns: tpg_tag['lun'][lun.storage_object.name] = lun for portal in tpg.network_portals: portal_id = portal.ip_address + ":" + str(portal.port) tpg_tag['portal'][portal_id] = portal # Create a share def create_iscsi_target(self, params): """Create an iSCSI target Parameters ---------- params : dict Dictionary of parameters wwn: The World Wide Name of the share, eg, the IQN device: the backing device initiators: list of initiators """ wwn = params.get('wwn', None) device = params.get('device', None) initiators = params.get('initiators', None) ip = params.get('ip', '0.0.0.0') port = params.get('port', 3260) # Something outside this library lowercase the wwn, so # we lowercase the input to stay consistent if wwn is not None: wwn = wwn.lower() # If at any step, something needs to be created, # then true is returned to the caller to show that # this iscsi target needed to be created. # # It is possible to call this method for an existing # iscsi target, in which case this method does nothing # # By tracking this behavior, the caller can be informed # whether or not any action was taken created = None # Create blockstore, if needed blockstore = self.get_block_store(wwn) if blockstore is None: blockstore = self.create_block_store(wwn, device) created = True else: Log.info('block backstore %s already exists, not creating' % (wwn)) # Create target target = self.get_target(wwn) if target is None: target = self.create_target(wwn) created = True else: Log.info('target %s already exists, not creating' % (wwn)) # Create TPG tag = 1 tpg = self.get_tpg(target, tag) if tpg is None: tpg = self.create_tpg(target, tag) created = True else: Log.info('tpg (%s, %s) already exists, not creating' % (target, tag)) # Create LUN # First, check to see if there are any LUNs. More than one LUN is not # supported, so we just iterate over all (eg, the one) lun and set it. # If there's more than one LUN, then the last one will be the LUN that # is used, which may result in undefined behavior lun = None for lun in tpg.luns: pass if lun is None: lun = self.create_lun(tpg, blockstore) created = True else: Log.info('lun %s already exists, not creating' % (blockstore.name)) # Create portal portal = self.get_portal(tpg, ip, port) if portal is None: portal = self.create_portal(tpg, ip, port) created = True else: portal_id = self.get_portal_id(ip, port) Log.info('portal %s already exists, not creating' % (portal_id)) # Set up ACLs and mapped LUNs for initiator in initiators: # Create ACL acl = self.get_acl(tpg, initiator) if acl is None: acl = self.create_acl(tpg, initiator) created = True else: Log.info('acl (%s, %s) already exists, not creating' % (tpg, initiator)) # Map LUN num = 0 # Like with LUNs, only one mapped lun is supported. Check for # a mapped lun by iterating over the entire set of mapped luns, # use the last one in the list, if any exist. # # If things are working properly, there should be only one mapped_lun = None for mapped_lun in acl.mapped_luns: pass if mapped_lun is None: mapped_lun = self.create_mapped_lun(acl, num, lun) created = True else: Log.info('mapped lun (%s, %s, %s) already exists' % (acl, num, lun)) return created def delete_target_and_block_store(self, params): """Delete an iSCSI target and block store. This does not delete the underlying storage Parameters ---------- target_wwn : string The world wide name of the share to remove """ wwn = params.get('wwn', None) if wwn is None: raise ValueError('No wwn specified') # Delete target self.delete_target(wwn) # Delete blockstore self.delete_block_store(wwn) def get_block_store(self, wwn): """Get an existing block store, if it exists Parameters ---------- wwn : string World Wide Name for the block store device : string Path to a block device Returns: -------- If the block store exists, then that object is returned. Otherwise, None is returned """ return self.block_store.get(wwn, None) def create_block_store(self, wwn, device): """Create a blockstore with the given wwn. It is assumed that the blockstore does not already exists. Calling this method when the storage already exists can potentially result in an exception being thrown. Call get_block_store first to check for existence. Parameters ---------- wwn : string World Wide Name for the block store device : string Path to a block device Returns: -------- blockstore object, if it was successfully created """ Log.info('creating block backstore %s for device %s' % (wwn, device)) storage = BlockStorageObject(wwn, device, wwn) self.block_store[wwn] = storage return storage # Delete blockstore, if it exists def delete_block_store(self, name): store = self.block_store.get(name) # If blockstore doesn't exist, do not proceed if store is None: Log.info('No block store %s. Not deleting' % name) return Log.info('deleting block store %s' % (name)) # Delete the block store. The backing device, file, etc, still exists store.delete() del self.block_store[name] # Delete target, if it exists def delete_target(self, wwn): # See if the target exists target_dict = self.target.get(wwn, None) # Doesn't exist, don't proceed if target_dict is None: Log.info('No target %s. Not deleting' % wwn) return target = target_dict.get('target', None) # Surprising, but possible, because processes can die # and the state can strange if target is None: return Log.info('deleting target %s' % (wwn)) # Delete the target target.delete() del self.target[wwn] def get_target(self, wwn): '''Get an existing target object for the wwn Parameters ---------- wwn : string The wwn of the target Returns ------- The target object if it exists, None otherwise ''' target_dict = self.target.get(wwn, None) target = None if target_dict is not None: target = target_dict['target'] return target # Create target, if needed def create_target(self, wwn): target_dict = self.target.get(wwn, None) target = None if target_dict is None: Log.info('creating target with wwn %s' % (wwn)) # The wwn will be lowercased automatically by something # outside this library. I'm not sure if its RTSLib or # the underlying Linux target system target = Target(self.iscsi, wwn) # Add target to data structure, initialize empty child nodes self.target[wwn] = {'target': target, 'tpg': {}} else: Log.info('target %s already exists, not creating' % (wwn)) target = target_dict['target'] return target def get_tpg(self, target, tag): '''Get a target portal group Parameters ---------- target: Target The target tag: Tag The tag Returns ------- The target portal group, if it exists, None otherwise ''' tpg_list = self.target[target.wwn]['tpg'] tpg_list_tag = tpg_list.get(tag, None) tpg = None if tpg_list_tag is not None: tpg = tpg_list[tag]['tpg'] return tpg # Create TPG, if needed def create_tpg(self, target, tag): tpg_list = self.target[target.wwn]['tpg'] tpg_list_tag = tpg_list.get(tag, None) if tpg_list_tag is None: Log.info('creating tpg (%s, %s)' % (target, tag)) # Create and configure the target portal group tpg = TPG(target, tag) tpg.set_attribute("authentication", 0) tpg.enable = 1 # Set up the list of TPGs for this target tpg_list[tag] = { 'tpg': tpg, 'acl': {'mapped_lun': {}}, 'lun': {}, 'portal': {} } else: Log.info('tpg (%s, %s) already exists, not creating' % (target, tag)) tpg = tpg_list[tag]['tpg'] return tpg # Create LUN, if needed def create_lun(self, tpg, blockstore): wwn = tpg.parent_target.wwn lun_list = self.target[wwn]['tpg'][tpg.tag]['lun'] lun = lun_list.get(blockstore.name, None) if lun is None: Log.info('creating lun %s, blockstore %s' % (tpg, blockstore)) # Create the LUN lun = LUN(tpg, 0, blockstore) # Add it to the local data structure for tracking LUNs lun_list[blockstore.name] = lun else: # LUN already exists Log.info('lun %s already exists, not creating' % (blockstore.name)) return lun def get_portal_id(self, ip, port): return '%s:%d' % (ip, port) def get_portal(self, tpg, ip, port): portal = None portal_id = self.get_portal_id(ip, port) wwn = tpg.parent_target.wwn portal_list = self.target[wwn]['tpg'][tpg.tag]['portal'] return portal_list.get(portal_id, None) # Create portal, if needed def create_portal(self, tpg, ip, port): portal = None portal_id = self.get_portal_id(ip, port) wwn = tpg.parent_target.wwn portal_list = self.target[wwn]['tpg'][tpg.tag]['portal'] if portal_id in portal_list: Log.info('portal %s already exists, not creating' % (portal_id)) portal = portal_list[portal_id] else: Log.info('creating portal (%s, %s, %s)' % (tpg, ip, port)) portal = NetworkPortal(tpg, ip, port) portal_list[portal_id] = portal return portal def get_acl(self, tpg, initiator_name): acl = None wwn = tpg.parent_target.wwn acl_list = self.target[wwn]['tpg'][tpg.tag]['acl'] return acl_list.get(initiator_name, None) # Create ACL, if needed def create_acl(self, tpg, initiator_name): acl = None wwn = tpg.parent_target.wwn acl_list = self.target[wwn]['tpg'][tpg.tag]['acl'] if initiator_name in acl_list: Log.info('acl (%s, %s) already exists, not creating' % (tpg, initiator_name)) acl = acl_list[initiator_name] else: Log.info('creating acl (%s, %s)' % (tpg, initiator_name)) acl = NodeACL(tpg, initiator_name) acl_list[initiator_name] = acl return acl # Create mapped lun, if needed def create_mapped_lun(self, acl, num, lun): mapped_lun = None if not list(acl.mapped_luns): Log.info('creating mapped lun (%s, %s, %s)' % (acl, num, lun)) mapped_lun = MappedLUN(acl, num, lun) else: Log.info('mapped lun (%s, %s, %s) already exists' % (acl, num, lun)) return mapped_lun
def access_group_destroy(req, ag_name): NodeACLGroup(_get_iscsi_tpg(), ag_name).delete() RTSRoot().save_to_file()
class UIRoot(UINode): """ The targetcli hierarchy root node. """ def __init__(self, shell, as_root=False): UINode.__init__(self, "/", shell=shell) self.as_root = as_root self.rtsroot = RTSRoot() def refresh(self): """ Refreshes the tree of target fabric modules. """ self._children = set([]) UIBackstores(self) # only show fabrics present in the system for fm in self.rtsroot.fabric_modules: if fm.wwns == None or any(fm.wwns): UIFabricModule(fm, self) def ui_command_saveconfig(self, savefile=default_save_file): """ Saves the current configuration to a file so that it can be restored on next boot. """ self.assert_root() savefile = os.path.expanduser(savefile) # Only save backups if saving to default location if savefile == default_save_file: backup_dir = os.path.dirname(savefile) + "/backup" backup_name = "saveconfig-" + datetime.now().strftime("%Y%m%d-%H:%M:%S") + ".json" backupfile = backup_dir + "/" + backup_name with ignored(IOError): shutil.copy(savefile, backupfile) # Kill excess backups backups = sorted(glob(os.path.dirname(savefile) + "/backup/*.json")) files_to_unlink = list(reversed(backups))[kept_backups:] for f in files_to_unlink: os.unlink(f) self.shell.log.info("Last %d configs saved in %s." % (kept_backups, backup_dir)) self.rtsroot.save_to_file(savefile) self.shell.log.info("Configuration saved to %s" % savefile) def ui_command_restoreconfig(self, savefile=default_save_file, clear_existing=False): """ Restores configuration from a file. """ self.assert_root() savefile = os.path.expanduser(savefile) if not os.path.isfile(savefile): self.shell.log.info("Restore file %s not found" % savefile) return errors = self.rtsroot.restore_from_file(savefile, clear_existing) self.refresh() if errors: raise ExecutionError( "Configuration restored, %d recoverable errors:\n%s" % (len(errors), "\n".join(errors)) ) self.shell.log.info("Configuration restored from %s" % savefile) def ui_complete_saveconfig(self, parameters, text, current_param): """ Auto-completes the file name """ if current_param != "savefile": return [] completions = complete_path(text, stat.S_ISREG) if len(completions) == 1 and not completions[0].endswith("/"): completions = [completions[0] + " "] return completions ui_complete_restoreconfig = ui_complete_saveconfig def ui_command_clearconfig(self, confirm=None): """ Removes entire configuration of backstores and targets """ self.assert_root() confirm = self.ui_eval_param(confirm, "bool", False) self.rtsroot.clear_existing(confirm=confirm) self.shell.log.info("All configuration cleared") self.refresh() def ui_command_version(self): """ Displays the targetcli and support libraries versions. """ from targetcli import __version__ as targetcli_version self.shell.log.info("targetcli version %s" % targetcli_version) def ui_command_sessions(self, action="list", sid=None): """ Displays a detailed list of all open sessions. PARAMETERS ========== I{action} --------- The I{action} is one of: - B{list} gives a short session list - B{detail} gives a detailed list I{sid} ------ You can specify an I{sid} to only list this one, with or without details. SEE ALSO ======== status """ indent_step = 4 base_steps = 0 action_list = ("list", "detail") if action not in action_list: raise ExecutionError("action must be one of: %s" % ", ".join(action_list)) if sid is not None: try: int(sid) except ValueError: raise ExecutionError("sid must be a number, '%s' given" % sid) def indent_print(text, steps): console = self.shell.con console.display(console.indent(text, indent_step * steps), no_lf=True) def print_session(session): acl = session["parent_nodeacl"] indent_print( "alias: %(alias)s\tsid: %(id)i type: " "%(type)s session-state: %(state)s" % session, base_steps ) if action == "detail": if self.as_root: if acl.authenticate_target: auth = " (authenticated)" else: auth = " (NOT AUTHENTICATED)" else: auth = "" indent_print("name: %s%s" % (acl.node_wwn, auth), base_steps + 1) for mlun in acl.mapped_luns: plugin = mlun.tpg_lun.storage_object.plugin name = mlun.tpg_lun.storage_object.name if mlun.write_protect: mode = "r" else: mode = "rw" indent_print( "mapped-lun: %d backstore: %s/%s mode: %s" % (mlun.mapped_lun, plugin, name, mode), base_steps + 1, ) for connection in session["connections"]: indent_print( "address: %(address)s (%(transport)s) cid: " "%(cid)i connection-state: %(cstate)s" % connection, base_steps + 1, ) if sid: printed_sessions = [x for x in self.rtsroot.sessions if x["id"] == int(sid)] else: printed_sessions = list(self.rtsroot.sessions) if len(printed_sessions): for session in printed_sessions: print_session(session) else: if sid is None: indent_print("(no open sessions)", base_steps) else: raise ExecutionError("no session found with sid %i" % int(sid))
def __init__(self, shell, as_root=False): UINode.__init__(self, "/", shell=shell) self.as_root = as_root self.rtsroot = RTSRoot()
class TargetManager: 'Manages ZVOL based iSCSI targets for Emulab diskless booting' # Constructor def __init__(self): self.block_store = {} self.target = {} self.root = RTSRoot() self.iscsi = FabricModule('iscsi') self.mapped_luns = {} self.get_block_store_objects() self.get_targets() def save(self): '''Save the current configuration''' self.root.save_to_file() # Get list of block storage objects def get_block_store_objects(self): self.block_store = {} for storage_object in self.root.storage_objects: if storage_object.plugin == "block": self.block_store[storage_object.name] = storage_object # Get a list of iscsi targets and associated luns, acls and portals # This builds a data structure that is a hash that hash other hashes # as values, and then other hashes, etc. To see what the data structure # looks like, run targetcli from the command line and issue the ls command. # # This data structure mimics that list for fast lookup for creating # shares for lots of nodes. # # This code is really confusing, in case you couldn't tell. # # target 0..N -> target.wwn # | # +---tpgs List of target portal groups, this code assumes only one # | self.target[wwn]['tpg'][tpg.tag]['acl'][initiator_name'] # = mapped_lun # | # +--acls List of initiator names that can log into this iscsi # target # | self.target[wwn]['tpg'][tpg.tag]['acl'] = { # initiator_name : acl # } # | # +--luns List of LUNS for this TPG # | self.target[wwn]['lun'][lun.storage_object.name] = lun # | # +--portals List of portals for this TPG # self.target[wwn]['portal'][portal.ip_address:portal.port] = portal # There can be any number of targets, each uniquely identified by its wwn # (World Wide Name) which is also known as the initiator name. This is # the unique name assigned to each client. The client knows about this # name either by looking at its kernel parameters, the initiator name # stored in the BIOS, but usually in /etc/iscsi/initiatorname.iscsi # # self.target[wwn]['tpg'] [tpg.tag] ['acl'] [initiator_name] = # MappedLUN object # self.target[wwn]['lun'] [lun_storage_object.name] = LUN object # self.target[wwn]['portal'] [portal_id] = Portal object # def get_targets(self): for target in list(self.iscsi.targets): wwn = target.wwn self.target[wwn] = {'target': target, 'tpg': {}} for tpg in target.tpgs: self.target[wwn]['tpg'][tpg.tag] = { 'tpg': tpg, 'acl': {}, 'lun': {}, 'portal': {} } tpg_tag = self.target[wwn]['tpg'][tpg.tag] for acl in tpg.node_acls: tpg_tag['acl'][acl.node_wwn] = acl for lun in tpg.luns: tpg_tag['lun'][lun.storage_object.name] = lun for portal in tpg.network_portals: portal_id = portal.ip_address + ":" + str(portal.port) tpg_tag['portal'][portal_id] = portal # Create a share def create_iscsi_target(self, params): """Create an iSCSI target Parameters ---------- params : dict Dictionary of parameters wwn: The World Wide Name of the share, eg, the IQN device: the backing device initiators: list of initiators """ wwn = params.get('wwn', None) device = params.get('device', None) initiators = params.get('initiators', None) ip = params.get('ip', '0.0.0.0') port = params.get('port', 3260) # Something outside this library lowercase the wwn, so # we lowercase the input to stay consistent if wwn is not None: wwn = wwn.lower() # If at any step, something needs to be created, # then true is returned to the caller to show that # this iscsi target needed to be created. # # It is possible to call this method for an existing # iscsi target, in which case this method does nothing # # By tracking this behavior, the caller can be informed # whether or not any action was taken created = None # Create blockstore, if needed blockstore = self.get_block_store(wwn) if blockstore is None: blockstore = self.create_block_store(wwn, device) created = True else: Log.info('block backstore %s already exists, not creating' % (wwn)) # Create target target = self.get_target(wwn) if target is None: target = self.create_target(wwn) created = True else: Log.info('target %s already exists, not creating' % (wwn)) # Create TPG tag = 1 tpg = self.get_tpg(target, tag) if tpg is None: tpg = self.create_tpg(target, tag) created = True else: Log.info('tpg (%s, %s) already exists, not creating' % (target, tag)) # Create LUN # First, check to see if there are any LUNs. More than one LUN is not # supported, so we just iterate over all (eg, the one) lun and set it. # If there's more than one LUN, then the last one will be the LUN that # is used, which may result in undefined behavior lun = None for lun in tpg.luns: pass if lun is None: lun = self.create_lun(tpg, blockstore) created = True else: Log.info('lun %s already exists, not creating' % (blockstore.name)) # Create portal portal = self.get_portal(tpg, ip, port) if portal is None: portal = self.create_portal(tpg, ip, port) created = True else: portal_id = self.get_portal_id(ip, port) Log.info('portal %s already exists, not creating' % (portal_id)) # Set up ACLs and mapped LUNs for initiator in initiators: # Create ACL acl = self.get_acl(tpg, initiator) if acl is None: acl = self.create_acl(tpg, initiator) created = True else: Log.info('acl (%s, %s) already exists, not creating' % (tpg, initiator)) # Map LUN num = 0 # Like with LUNs, only one mapped lun is supported. Check for # a mapped lun by iterating over the entire set of mapped luns, # use the last one in the list, if any exist. # # If things are working properly, there should be only one mapped_lun = None for mapped_lun in acl.mapped_luns: pass if mapped_lun is None: mapped_lun = self.create_mapped_lun(acl, num, lun) created = True else: Log.info('mapped lun (%s, %s, %s) already exists' % (acl, num, lun)) return created def delete_target_and_block_store(self, params): """Delete an iSCSI target and block store. This does not delete the underlying storage Parameters ---------- target_wwn : string The world wide name of the share to remove """ wwn = params.get('wwn', None) if wwn is None: raise ValueError('No wwn specified') # Delete target self.delete_target(wwn) # Delete blockstore self.delete_block_store(wwn) def get_block_store(self, wwn): """Get an existing block store, if it exists Parameters ---------- wwn : string World Wide Name for the block store device : string Path to a block device Returns: -------- If the block store exists, then that object is returned. Otherwise, None is returned """ return self.block_store.get(wwn, None) def create_block_store(self, wwn, device): """Create a blockstore with the given wwn. It is assumed that the blockstore does not already exists. Calling this method when the storage already exists can potentially result in an exception being thrown. Call get_block_store first to check for existence. Parameters ---------- wwn : string World Wide Name for the block store device : string Path to a block device Returns: -------- blockstore object, if it was successfully created """ Log.info('creating block backstore %s for device %s' % (wwn, device)) storage = BlockStorageObject(wwn, device, wwn) self.block_store[wwn] = storage return storage # Delete blockstore, if it exists def delete_block_store(self, name): store = self.block_store.get(name) # If blockstore doesn't exist, do not proceed if store is None: Log.info('No block store %s. Not deleting' % name) return Log.info('deleting block store %s' % (name)) # Delete the block store. The backing device, file, etc, still exists store.delete() del self.block_store[name] # Delete target, if it exists def delete_target(self, wwn): # See if the target exists target_dict = self.target.get(wwn, None) # Doesn't exist, don't proceed if target_dict is None: Log.info('No target %s. Not deleting' % wwn) return target = target_dict.get('target', None) # Surprising, but possible, because processes can die # and the state can strange if target is None: return Log.info('deleting target %s' % (wwn)) # Delete the target target.delete() del self.target[wwn] def get_target(self, wwn): '''Get an existing target object for the wwn Parameters ---------- wwn : string The wwn of the target Returns ------- The target object if it exists, None otherwise ''' target_dict = self.target.get(wwn, None) target = None if target_dict is not None: target = target_dict['target'] return target # Create target, if needed def create_target(self, wwn): target_dict = self.target.get(wwn, None) target = None if target_dict is None: Log.info('creating target with wwn %s' % (wwn)) # The wwn will be lowercased automatically by something # outside this library. I'm not sure if its RTSLib or # the underlying Linux target system target = Target(self.iscsi, wwn) # Add target to data structure, initialize empty child nodes self.target[wwn] = {'target': target, 'tpg': {}} else: Log.info('target %s already exists, not creating' % (wwn)) target = target_dict['target'] return target def get_tpg(self, target, tag): '''Get a target portal group Parameters ---------- target: Target The target tag: Tag The tag Returns ------- The target portal group, if it exists, None otherwise ''' tpg_list = self.target[target.wwn]['tpg'] tpg_list_tag = tpg_list.get(tag, None) tpg = None if tpg_list_tag is not None: tpg = tpg_list[tag]['tpg'] return tpg # Create TPG, if needed def create_tpg(self, target, tag): tpg_list = self.target[target.wwn]['tpg'] tpg_list_tag = tpg_list.get(tag, None) if tpg_list_tag is None: Log.info('creating tpg (%s, %s)' % (target, tag)) # Create and configure the target portal group tpg = TPG(target, tag) tpg.set_attribute("authentication", 0) tpg.enable = 1 # Set up the list of TPGs for this target tpg_list[tag] = { 'tpg': tpg, 'acl': { 'mapped_lun': {} }, 'lun': {}, 'portal': {} } else: Log.info('tpg (%s, %s) already exists, not creating' % (target, tag)) tpg = tpg_list[tag]['tpg'] return tpg # Create LUN, if needed def create_lun(self, tpg, blockstore): wwn = tpg.parent_target.wwn lun_list = self.target[wwn]['tpg'][tpg.tag]['lun'] lun = lun_list.get(blockstore.name, None) if lun is None: Log.info('creating lun %s, blockstore %s' % (tpg, blockstore)) # Create the LUN lun = LUN(tpg, 0, blockstore) # Add it to the local data structure for tracking LUNs lun_list[blockstore.name] = lun else: # LUN already exists Log.info('lun %s already exists, not creating' % (blockstore.name)) return lun def get_portal_id(self, ip, port): return '%s:%d' % (ip, port) def get_portal(self, tpg, ip, port): portal = None portal_id = self.get_portal_id(ip, port) wwn = tpg.parent_target.wwn portal_list = self.target[wwn]['tpg'][tpg.tag]['portal'] return portal_list.get(portal_id, None) # Create portal, if needed def create_portal(self, tpg, ip, port): portal = None portal_id = self.get_portal_id(ip, port) wwn = tpg.parent_target.wwn portal_list = self.target[wwn]['tpg'][tpg.tag]['portal'] if portal_id in portal_list: Log.info('portal %s already exists, not creating' % (portal_id)) portal = portal_list[portal_id] else: Log.info('creating portal (%s, %s, %s)' % (tpg, ip, port)) portal = NetworkPortal(tpg, ip, port) portal_list[portal_id] = portal return portal def get_acl(self, tpg, initiator_name): acl = None wwn = tpg.parent_target.wwn acl_list = self.target[wwn]['tpg'][tpg.tag]['acl'] return acl_list.get(initiator_name, None) # Create ACL, if needed def create_acl(self, tpg, initiator_name): acl = None wwn = tpg.parent_target.wwn acl_list = self.target[wwn]['tpg'][tpg.tag]['acl'] if initiator_name in acl_list: Log.info('acl (%s, %s) already exists, not creating' % (tpg, initiator_name)) acl = acl_list[initiator_name] else: Log.info('creating acl (%s, %s)' % (tpg, initiator_name)) acl = NodeACL(tpg, initiator_name) acl_list[initiator_name] = acl return acl # Create mapped lun, if needed def create_mapped_lun(self, acl, num, lun): mapped_lun = None if not list(acl.mapped_luns): Log.info('creating mapped lun (%s, %s, %s)' % (acl, num, lun)) mapped_lun = MappedLUN(acl, num, lun) else: Log.info('mapped lun (%s, %s, %s) already exists' % (acl, num, lun)) return mapped_lun
def ui_command_create(self, name, file_or_dev, size=None, write_back=None, sparse=None, wwn=None): ''' Creates a FileIO storage object. If I{file_or_dev} is a path to a regular file to be used as backend, then the I{size} parameter is mandatory. Else, if I{file_or_dev} is a path to a block device, the size parameter B{must} be ommited. If present, I{size} is the size of the file to be used, I{file} the path to the file or I{dev} the path to a block device. The I{write_back} parameter is a boolean controlling write caching. It is enabled by default. The I{sparse} parameter is only applicable when creating a new backing file. It is a boolean stating if the created file should be created as a sparse file (the default), or fully initialized. SIZE SYNTAX =========== - If size is an int, it represents a number of bytes. - If size is a string, the following units can be used: - B{B} or no unit present for bytes - B{k}, B{K}, B{kB}, B{KB} for kB (kilobytes) - B{m}, B{M}, B{mB}, B{MB} for MB (megabytes) - B{g}, B{G}, B{gB}, B{GB} for GB (gigabytes) - B{t}, B{T}, B{tB}, B{TB} for TB (terabytes) ''' self.assert_root() sparse = self.ui_eval_param(sparse, 'bool', True) write_back = self.ui_eval_param(write_back, 'bool', True) wwn = self.ui_eval_param(wwn, 'string', None) self.shell.log.debug("Using params size=%s write_back=%s sparse=%s" % (size, write_back, sparse)) file_or_dev = os.path.expanduser(file_or_dev) # can't use is_dev_in_use() on files so just check against other # storage object paths if os.path.exists(file_or_dev): for so in RTSRoot().storage_objects: if so.udev_path and os.path.samefile(file_or_dev, so.udev_path): raise ExecutionError("storage object for %s already exists: %s" % \ (file_or_dev, so.name)) if get_block_type(file_or_dev) is not None: if size: self.shell.log.info("Block device, size parameter ignored") size = None self.shell.log.info( "Note: block backstore preferred for best results") else: # use given file size only if backing file does not exist if os.path.isfile(file_or_dev): new_size = os.path.getsize(file_or_dev) if size: self.shell.log.info( "%s exists, using its size (%s bytes) instead" % (file_or_dev, new_size)) size = new_size elif os.path.exists(file_or_dev): raise ExecutionError("Path %s exists but is not a file" % file_or_dev) else: # create file and extend to given file size if not size: raise ExecutionError("Attempting to create file for new" + " fileio backstore, need a size") size = human_to_bytes(size) self._create_file(file_or_dev, size, sparse) so = FileIOStorageObject(name, file_or_dev, size, write_back=write_back, wwn=wwn) ui_so = UIFileioStorageObject(so, self) self.setup_model_alias(so) self.shell.log.info("Created fileio %s with size %s" % (name, so.size)) return self.new_node(ui_so)
class UIRoot(UINode): ''' The targetcli hierarchy root node. ''' def __init__(self, shell, as_root=False): UINode.__init__(self, '/', shell=shell) self.as_root = as_root self.rtsroot = RTSRoot() def refresh(self): ''' Refreshes the tree of target fabric modules. ''' self._children = set([]) # Invalidate any rtslib caches if 'invalidate_caches' in dir(RTSRoot): self.rtsroot.invalidate_caches() UIBackstores(self) # only show fabrics present in the system for fm in self.rtsroot.fabric_modules: if fm.wwns == None or any(fm.wwns): UIFabricModule(fm, self) def _compare_files(self, backupfile, savefile): ''' Compare backfile and saveconfig file ''' if (os.path.splitext(backupfile)[1] == '.gz'): try: with gzip.open(backupfile, 'rb') as fbkp: fdata_bkp = fbkp.read() except IOError as e: self.shell.log.warning( "Could not gzip open backupfile %s: %s" % (backupfile, e.strerror)) else: try: with open(backupfile, 'rb') as fbkp: fdata_bkp = fbkp.read() except IOError as e: self.shell.log.warning("Could not open backupfile %s: %s" % (backupfile, e.strerror)) try: with open(savefile, 'rb') as f: fdata = f.read() except IOError as e: self.shell.log.warning("Could not open saveconfig file %s: %s" % (savefile, e.strerror)) if fdata_bkp == fdata: return True else: return False def _save_backups(self, savefile): ''' Take backup of config-file if needed. ''' # Only save backups if saving to default location if savefile != default_save_file: return backup_dir = os.path.dirname(savefile) + "/backup/" backup_name = "saveconfig-" + \ datetime.now().strftime("%Y%m%d-%H:%M:%S") + "-json.gz" backupfile = backup_dir + backup_name backup_error = None if not os.path.exists(backup_dir): try: os.makedirs(backup_dir) except OSError as exe: raise ExecutionError( "Cannot create backup directory [%s] %s." % (backup_dir, exe.strerror)) # Only save backups if savefile exits if not os.path.exists(savefile): return backed_files_list = sorted(glob(os.path.dirname(savefile) + \ "/backup/saveconfig-*json*")) # Save backup if backup dir is empty, or savefile is differnt from recent backup copy if not backed_files_list or not self._compare_files( backed_files_list[-1], savefile): try: with open(savefile, 'rb') as f_in, gzip.open(backupfile, 'wb') as f_out: shutil.copyfileobj(f_in, f_out) f_out.flush() except IOError as ioe: backup_error = ioe.strerror or "Unknown error" if backup_error == None: # remove excess backups max_backup_files = int(self.shell.prefs['max_backup_files']) try: with open(universal_prefs_file) as prefs: backups = [ line for line in prefs.read().splitlines() if re.match('^max_backup_files\s*=', line) ] if max_backup_files < int( backups[0].split('=')[1].strip()): max_backup_files = int( backups[0].split('=')[1].strip()) except: self.shell.log.debug("No universal prefs file '%s'." % universal_prefs_file) files_to_unlink = list( reversed(backed_files_list))[max_backup_files - 1:] for f in files_to_unlink: with ignored(IOError): os.unlink(f) self.shell.log.info("Last %d configs saved in %s." % (max_backup_files, backup_dir)) else: self.shell.log.warning("Could not create backup file %s: %s." % (backupfile, backup_error)) def ui_command_saveconfig(self, savefile=default_save_file): ''' Saves the current configuration to a file so that it can be restored on next boot. ''' self.assert_root() if not savefile: savefile = default_save_file savefile = os.path.expanduser(savefile) self._save_backups(savefile) self.rtsroot.save_to_file(savefile) self.shell.log.info("Configuration saved to %s" % savefile) def ui_command_restoreconfig(self, savefile=default_save_file, clear_existing=False, target=None, storage_object=None): ''' Restores configuration from a file. ''' self.assert_root() savefile = os.path.expanduser(savefile) if not os.path.isfile(savefile): self.shell.log.info("Restore file %s not found" % savefile) return target = self.ui_eval_param(target, 'string', None) storage_object = self.ui_eval_param(storage_object, 'string', None) errors = self.rtsroot.restore_from_file(savefile, clear_existing, target, storage_object) self.refresh() if errors: raise ExecutionError("Configuration restored, %d recoverable errors:\n%s" % \ (len(errors), "\n".join(errors))) self.shell.log.info("Configuration restored from %s" % savefile) def ui_complete_saveconfig(self, parameters, text, current_param): ''' Auto-completes the file name ''' if current_param != 'savefile': return [] completions = complete_path(text, stat.S_ISREG) if len(completions) == 1 and not completions[0].endswith('/'): completions = [completions[0] + ' '] return completions ui_complete_restoreconfig = ui_complete_saveconfig def ui_command_clearconfig(self, confirm=None): ''' Removes entire configuration of backstores and targets ''' self.assert_root() confirm = self.ui_eval_param(confirm, 'bool', False) self.rtsroot.clear_existing(confirm=confirm) self.shell.log.info("All configuration cleared") self.refresh() def ui_command_version(self): ''' Displays the targetcli and support libraries versions. ''' from targetcli import __version__ as targetcli_version self.shell.log.info("targetcli version %s" % targetcli_version) def ui_command_sessions(self, action="list", sid=None): ''' Displays a detailed list of all open sessions. PARAMETERS ========== action ------ The action is one of: - `list`` gives a short session list - `detail` gives a detailed list sid --- You can specify an "sid" to only list this one, with or without details. SEE ALSO ======== status ''' indent_step = 4 base_steps = 0 action_list = ("list", "detail") if action not in action_list: raise ExecutionError("action must be one of: %s" % ", ".join(action_list)) if sid is not None: try: int(sid) except ValueError: raise ExecutionError("sid must be a number, '%s' given" % sid) def indent_print(text, steps): console = self.shell.con console.display(console.indent(text, indent_step * steps), no_lf=True) def print_session(session): acl = session['parent_nodeacl'] indent_print("alias: %(alias)s\tsid: %(id)i type: " \ "%(type)s session-state: %(state)s" % session, base_steps) if action == 'detail': if self.as_root: if acl.authenticate_target: auth = " (authenticated)" else: auth = " (NOT AUTHENTICATED)" else: auth = "" indent_print("name: %s%s" % (acl.node_wwn, auth), base_steps + 1) for mlun in acl.mapped_luns: plugin = mlun.tpg_lun.storage_object.plugin name = mlun.tpg_lun.storage_object.name if mlun.write_protect: mode = "r" else: mode = "rw" indent_print( "mapped-lun: %d backstore: %s/%s mode: %s" % (mlun.mapped_lun, plugin, name, mode), base_steps + 1) for connection in session['connections']: indent_print("address: %(address)s (%(transport)s) cid: " \ "%(cid)i connection-state: %(cstate)s" % \ connection, base_steps + 1) if sid: printed_sessions = [ x for x in self.rtsroot.sessions if x['id'] == int(sid) ] else: printed_sessions = list(self.rtsroot.sessions) if len(printed_sessions): for session in printed_sessions: print_session(session) else: if sid is None: indent_print("(no open sessions)", base_steps) else: raise ExecutionError("no session found with sid %i" % int(sid))
def __init__(self, shell, as_root=False): UINode.__init__(self, '/', shell=shell) self.as_root = as_root self.rtsroot = RTSRoot()
def refresh(self): self._children = set([]) for so in RTSRoot().storage_objects: if so.plugin == self.name: ui_so = self.so_cls(so, self)