def set_smb_conf(self, config={}, operator="unkown", **kwargs): if not isinstance(config, dict): raise StorLeverError("Parameter type error", 500) if len(config) == 0 and len(kwargs) == 0: return config.update(kwargs) not_allowed_keys = ("share_list", ) config = filter_dict(config, not_allowed_keys, True) if "guest_account" in config and config["guest_account"] is not None: try: user_mgr().get_user_info_by_name(config["guest_account"]) except Exception as e: raise StorLeverError("guest_account does not exist", 400) with self.lock: smb_conf = self._load_conf() for name, value in config.items(): if name == "share_list": continue if name in smb_conf and value is not None: smb_conf[name] = value # check config conflict smb_conf = self.smb_conf_schema.validate(smb_conf) # save new conf self._save_conf(smb_conf) self._sync_to_system_conf(smb_conf) logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "Samba config is updated by user(%s)" % (operator))
def add_intermediate_node(self, node_id, parent_id, text, uri, require_js): inter_node = MenuNode(node_id, parent_id, "intermediate", text, uri, require_js) with self.lock: #check id duplicate if node_id in self.nodes: raise StorLeverError("node_id already exist", 400) # add the sub node to the new intermediate node for entry_id, entry_node in self.nodes.items(): if entry_node.parent_id == node_id: if entry_node.node_type != "leaf": raise StorLeverError("intermediate node (%s) cannot be sub node of " "another intermediate node (%s)" % (entry_node.node_id, inter_node.node_id), 400) inter_node.add_sub_node(entry_node) # add the new intermediate node to the exist root node parent_node = self.nodes.get(inter_node.parent_id) if parent_node is not None: #check some rules if parent_node.node_type != "root": raise StorLeverError("intermediate node (%s) cannot be sub node of " "an non-root node (%s)" % (inter_node.node_id, parent_node.node_id), 400) parent_node.add_sub_node(inter_node) self.nodes[inter_node.node_id] = inter_node
def raise_from_error(self, info=''): if self._hdlr is None: raise StorLeverError(info) elif info: raise StorLeverError(info + '\n' + lvm_errmsg(self._hdlr)) else: raise StorLeverError(lvm_errmsg(self._hdlr))
def mod_dir_owner(self, relative_path, user=None, group=None, operator="unknown"): if not self.is_available(): raise StorLeverError("File system is unavailable", 500) if "." in relative_path or ".." in relative_path: raise StorLeverError("name cannot include . or ..", 400) if relative_path.startswith("/"): raise StorLeverError("name must be a relative path name", 400) path = os.path.join(self.fs_conf["mount_point"], relative_path) if not os.path.exists(path): raise StorLeverError("Share directory not found", 404) umgr = user_mgr() if user is None: uid = -1 else: uid = umgr.get_user_info_by_name(user)["uid"] if group is None: gid = -1 else: gid = umgr.get_group_by_name(group)["gid"] os.chown(path, uid, gid) logger.log( logging.INFO, logger.LOG_TYPE_CONFIG, "Share directory (%s) owner is changed to (%s:%s)" " by user(%s)" % (path, user, group, operator))
def set_export_conf(self, name, path=None, clients=None, operator="unkown"): if path is not None and path != "" and not os.path.exists(path): raise StorLeverError("path(%s) does not exists" % (path), 400) with self.lock: nfs_conf = self._load_conf() for index, point in enumerate(nfs_conf["export_point_list"]): if name == point["name"]: break else: raise StorLeverError("export(%s) not found" % (name), 404) if path is not None: point["path"] = path if clients is not None: point["clients"] = clients nfs_conf["export_point_list"][index] = self.export_point_conf_schema.validate(point) # save new conf self._save_conf(nfs_conf) self._sync_to_system_conf(nfs_conf) logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "NFS export (name:%s) config is updated by operator(%s)" % (name, operator))
def append_export_conf(self, name, path="/", clients=[], operator="unkown"): if path != "" and not os.path.exists(path): raise StorLeverError("path(%s) does not exists" % (path), 400) new_export_point = { "name": name, "path": path, "clients": clients, } new_export_point = self.export_point_conf_schema.validate(new_export_point) with self.lock: nfs_conf = self._load_conf() # check duplication for point in nfs_conf["export_point_list"]: if path == point["path"]: raise StorLeverError("export with path(%s) already in nfs export table" % (path), 400) if name == point["name"]: raise StorLeverError("export with name(%s) already in nfs export table" % (name), 400) nfs_conf["export_point_list"].append(new_export_point) # save new conf self._save_conf(nfs_conf) self._sync_to_system_conf(nfs_conf) logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "NFS export with path(%s) config is added by operator(%s)" % (path, operator))
def set_mail_conf(self, config={}, operator="unkown", *args, **kwargs): if not isinstance(config, dict): raise StorLeverError("Parameter type error", 500) if len(config) == 0 and len(kwargs) == 0: return config.update(kwargs) with self.lock: mail_conf = self._load_conf() for name, value in config.items(): if name in mail_conf and value is not None: mail_conf[name] = value # check config conflict mail_conf = self.mail_conf_schema.validate(mail_conf) if mail_conf["smtp_server"] != "": if mail_conf["email_addr"] == "": raise StorLeverError( "email_addr cannot be empty if smtp_server exists", 400) if mail_conf["password"] == "": raise StorLeverError( "password cannot be empty if smtp_server exists", 400) # save new conf self._save_conf(mail_conf) self._sync_to_system_conf(mail_conf) logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "Mail config is updated by operator(%s)" % (operator))
def add_fs(self, fs_name, type, dev_file, mount_option="", check_onboot=False, comment="", user="******"): """add a filesystem with the given properties to storlever The new filesystem would be mount on the specific directory(/mnt/FS_NAME) and would be added to the storlever's fs config """ # check type if type not in self.support_fs_type: raise StorLeverError("type(%s) does not support" % type, 400) # check mount point mount_point = os.path.join(MOUNT_DIR, fs_name) if os.path.exists(mount_point): if not os.path.isdir(mount_point): raise StorLeverError("mount point(%s) already exists and is not directory" % mount_point) else: # create mount point os.makedirs(mount_point) # don't check dev file exist, because for the network fs, the dev file is a network id # if not os.path.exists(dev_file): # raise StorLeverError("dev file(%s) does not exist" % dev_file, 400) dev_uuid = "" if (not dev_file.startswith("/dev/mapper")) and os.path.exists(dev_file): dev_uuid = self._dev_file_to_uuid(dev_file) fs_conf = { "type": type, "dev_file": dev_file, "dev_uuid": dev_uuid, "mount_point": mount_point, "mount_option": mount_option, "check_onboot": check_onboot, "comment": comment } fs_conf = self.fs_conf_schema.validate(fs_conf) with self.lock: fs_dict = self._load_conf() if fs_name in fs_dict: raise StorLeverError("filesystem(%s) already exist" % fs_name, 400) # mount fs first self._mount_fs(fs_name, fs_conf) fs_dict[fs_name] = fs_conf self._save_conf(fs_dict) self._sync_to_fstab(fs_dict) logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "New filesystem %s (dev:%s, mount_point:%s, option:%s) " "is added by user(%s)" % (fs_name, dev_file, mount_point, mount_option, user))
def set_share_conf(self, share_name, path=None, comment=None, create_mask=None, directory_mask=None, guest_ok=None, read_only=None, browseable=None, force_create_mode=None, force_directory_mode=None, valid_users=None, write_list=None, veto_files=None, operator="unkown"): if path is not None and path != "" and not os.path.exists(path): raise StorLeverError("path(%s) does not exists" % (path), 400) with self.lock: smb_conf = self._load_conf() share_conf = smb_conf["share_list"].get(share_name) if share_conf is None: raise StorLeverError("share_conf(%s) not found" % (share_conf), 404) if path is not None: share_conf["path"] = path if comment is not None: share_conf["comment"] = comment if create_mask is not None: share_conf["create_mask"] = create_mask if directory_mask is not None: share_conf["directory_mask"] = directory_mask if guest_ok is not None: share_conf["guest_ok"] = guest_ok if read_only is not None: share_conf["read_only"] = read_only if browseable is not None: share_conf["browseable"] = browseable if force_create_mode is not None: share_conf["force_create_mode"] = force_create_mode if force_directory_mode is not None: share_conf["force_directory_mode"] = force_directory_mode if valid_users is not None: share_conf["valid_users"] = valid_users if write_list is not None: share_conf["write_list"] = write_list if veto_files is not None: share_conf["veto_files"] = veto_files # save new conf self._save_conf(smb_conf) self._sync_to_system_conf(smb_conf) logger.log( logging.INFO, logger.LOG_TYPE_CONFIG, "Samba share (%s) config is updated by operator(%s)" % (share_name, operator))
def smart_test(self, scsi_id, test_type): if test_type not in ("offline", "short", "long", "conveyance"): raise StorLeverError("test_type (%s) Not Support" % test_type, 400) if self.dev_file == "": raise StorLeverError( "scsi_id (%s) has not be recognized" % self.scsi_id, 400) out = check_output([ SMARTCTL_CMD, "-t", test_type, "-T", "verypermissive", self.dev_file ])
def _parse_dev_file(dev_file): dev_file = str(dev_file) if ":" not in dev_file: raise StorLeverError("dev_file is not nfs source format (IP:Path)", 400) ip, sep, path = dev_file.partition(":") ip = ip.strip() path = path.strip() if len(ip) == 0 or len(path) == 0: raise StorLeverError("dev_file is not nfs source format (IP:Path)", 400) return ip, path
def update_monitor_conf(self, monitor_name, expression=None, option=None, operator="unknown"): with self.lock: snmp_conf = self._load_conf() monitor_list = snmp_conf["monitor_list"] update_monitor_index = 0 for index, monitor_conf in enumerate(monitor_list): if monitor_conf["monitor_name"] == monitor_name: update_monitor_index = index break else: raise StorLeverError("Monitor (%s) Not Found" % (monitor_name), 404) if expression is not None: monitor_conf["expression"] = expression if option is not None: monitor_conf["option"] = option monitor_list[update_monitor_index] = \ self.monitor_conf_schema.validate(monitor_conf) # save new conf self._save_conf(snmp_conf) self._sync_to_system_conf(snmp_conf) logger.log( logging.INFO, logger.LOG_TYPE_CONFIG, "SNMP Monitor (%s) config is updated by operator(%s)" % (monitor_name, operator))
def del_incominguser(self, name, operator="unkown"): with self.mgr.lock: conf = self.mgr._get_target_conf(self.iqn) found = None for user in conf["incominguser_list"]: user_name, sep, password = user.partition(":") if user_name == name: found = user if found is None: raise StorLeverError( "tgt target (iqn:%s) incominguser (%s) Not Found" % (self.iqn, name), 404) else: conf["incominguser_list"].remove(found) self.mgr._set_target_conf(self.iqn, conf) self.conf = conf # update the cache target conf self._update_target() logger.log( logging.INFO, logger.LOG_TYPE_CONFIG, "tgt target (iqn:%s) incominguser (%s) is deleted by operator(%s)" % (self.iqn, name, operator))
def refresh_property(self): lines = check_output([ LSBLK_CMD, "-ribn", "-o", "NAME,MAJ:MIN,TYPE,SIZE,RO,FSTYPE,MOUNTPOINT", self.dev_file ]).splitlines() if not lines: raise StorLeverError( "Device (%s) has been removed from system" % self.dev_file, 404) line = lines[0] line_list = line.split(" ") maj_num, sep, min_num = line_list[1].partition(":") if int(line_list[4]) == 0: ro = False else: ro = True self.name = line_list[0] self.major = int(maj_num) self.minor = int(min_num) self.size = int(line_list[3]) self.type = line_list[2] self.readonly = ro self.fs_type = line_list[5] self.mount_point = line_list[6]
def post_tgt_target_lun_list(request): iqn = request.matchdict['target_iqn'] tgt_mgr = tgtmgr.TgtManager target = tgt_mgr.get_target_by_iqn(iqn) new_lun_conf = get_params_from_request(request, lun_conf_schema) if "path" not in new_lun_conf: raise StorLeverError("New LUN's path must be configured", 400) target.add_lun(new_lun_conf["lun"], new_lun_conf["path"], new_lun_conf.get("device_type", "disk"), new_lun_conf.get("bs_type", "rdwr"), new_lun_conf.get("direct_map", False), new_lun_conf.get("write_cache", True), new_lun_conf.get("readonly", False), new_lun_conf.get("online", True), new_lun_conf.get("scsi_id", ""), new_lun_conf.get("scsi_sn", ""), operator=request.client_addr) # generate 201 response resp = Response(status=201) resp.location = request.route_url('tgt_target_lun_info', target_iqn=iqn, lun_number=new_lun_conf["lun"]) return resp
def get_peer_list(self): connections = [] ll = check_output([NTPQ_CMD, '-pn']).splitlines() for l in ll[2:]: s = l[1:].split() if len(s) < 10: raise StorLeverError("ntpq output format cannot be regonized" , 500) try: when = int(s[4]) except Exception: when = 0 connections.append({ "state": l[0], "remote": s[0], "refid": s[1], "stratum": int(s[2]), "type": s[3], "when": when, "poll": int(s[5]), "reach": int(s[6], 8), "delay": float(s[7]), "offset": float(s[8]), "jitter": float(s[9]) }) return connections
def get_interface_by_name(self, name): if name == "lo": # loopback interface is not handled raise StorLeverError("Interface(%s) cannot support" % name, 404) dev = ifconfig.findif(name, False) if dev is None: raise StorLeverError("Interface(%s) does not exist" % name, 404) encap_type = self._get_if_encap(dev.name) if encap_type != 1: raise StorLeverError( "Interface(%s)'s type(%d) is not supported by storlever" % (name, encap_type), 400) return EthInterface(dev.name)
def quota_group_report(self): if not self.is_available(): raise StorLeverError("File system is unavailable", 500) gq_list = [] gq_lines = check_output( [REPQUOTA_BIN, "-gpv", self.fs_conf["mount_point"]]).splitlines() table_start = False start_pattern = re.compile(r"^(-)+$") entry_pattern = re.compile(r"^[-\+][-\+]$") for line in gq_lines: if table_start: if len(line) == 0: break elements = line.split() if len(elements) < 10: break if entry_pattern.match(elements[1]) is None: break gq_list.append({ "name": elements[0], "block_used": int(elements[2]), "block_softlimit": int(elements[3]), "block_hardlimit": int(elements[4]), "inode_used": int(elements[6]), "inode_softlimit": int(elements[7]), "inode_hardlimit": int(elements[8]) }) elif start_pattern.match(line) is not None: table_start = True return gq_list
def del_fs(self, fs_name, user="******"): """delete a filesystem from storlever the file would be deleted from the storlever's config file and would be unmount from linux system """ with self.lock: fs_dict = self._load_conf() if fs_name not in fs_dict: raise StorLeverError("filesystem(%s) does not exist" % fs_name, 400) fs_conf = fs_dict[fs_name] del fs_dict[fs_name] #umount fs first. if it failed, don't delete it in the config self._umount_fs(fs_name, fs_conf) self._save_conf(fs_dict) self._sync_to_fstab(fs_dict) try: os.rmdir(fs_conf["mount_point"]) except OSError as e: pass logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "filesystem %s (dev:%s, mount_point:%s, option:%s) " "is deleted by user(%s)" % (fs_name, fs_conf['dev_file'], fs_conf['mount_point'], fs_conf['mount_option'], user))
def add_monitor_conf(self, monitor_name, expression, option="", operator="unknown"): new_monitor_conf = { "monitor_name": monitor_name, "expression": expression, "option": option, } new_monitor_conf = self.monitor_conf_schema.validate(new_monitor_conf) with self.lock: snmp_conf = self._load_conf() monitor_list = snmp_conf["monitor_list"] # check duplicate for monitor_conf in monitor_list: if monitor_conf["monitor_name"] == monitor_name: raise StorLeverError( "monitor_name (%s) Already exist" % monitor_name, 400) monitor_list.append(new_monitor_conf) # save new conf self._save_conf(snmp_conf) self._sync_to_system_conf(snmp_conf) logger.log( logging.INFO, logger.LOG_TYPE_CONFIG, "SNMP New Monitor (%s) is added by operator(%s)" % (monitor_name, operator))
def quota_group_set(self, group, block_softlimit=0, block_hardlimit=0, inode_softlimit=0, inode_hardlimit=0, operator="unknown"): if not self.is_available(): raise StorLeverError("File system is unavailable", 500) setquota_agrs = [ SETQUOTA_BIN, "-g", group, str(block_softlimit), str(block_hardlimit), str(inode_softlimit), str(inode_hardlimit), self.fs_conf["mount_point"] ] check_output(setquota_agrs) logger.log( logging.INFO, logger.LOG_TYPE_CONFIG, "File System(%s) quota for group(%s) is changed to " "(%d,%d,%d,%d)" " by user(%s)" % (self.name, group, block_softlimit, block_hardlimit, inode_softlimit, inode_hardlimit, operator))
def __init__(self, lvm, name, mode=MODE_READ): self._lvm = lvm if not lvm or not lvm.hdlr: raise StorLeverError('') self.name = name self._mode = mode if mode == self.MODE_READ: self._hdlr = lvm_vg_open(self._lvm.hdlr, self.name, 'r', 0) elif mode == self.MODE_WRITE: self._hdlr = lvm_vg_open(self._lvm.hdlr, self.name, 'w', 0) elif mode == self.MODE_NEW: self._hdlr = lvm_vg_create(self._lvm.hdlr, self.name) else: raise StorLeverError('Unknown VG operation mode') if not bool(self._hdlr): self.raise_from_error(info='Failed to open VG handler')
def get_lun_by_num(self, lun_num): for lun in self.conf["lun_list"]: if lun["lun"] == lun_num: return lun raise StorLeverError( "Target (iqn:%s) Lun (%d)Not Found" % (self.iqn, lun_num), 404)
def set_smart_config(self, smart_enabled=None, auto_offline_enabled=None): if self.dev_file == "": raise StorLeverError( "scsi_id (%s) has not be recognized" % self.scsi_id, 400) if smart_enabled is not None: if smart_enabled: param = "on" else: param = "off" out = check_output([ SMARTCTL_CMD, "-s", param, "-T", "verypermissive", self.dev_file ]) if auto_offline_enabled is not None: if auto_offline_enabled: param = "on" else: param = "off" out = check_output([ SMARTCTL_CMD, "-o", param, "-T", "verypermissive", self.dev_file ])
def set_state(self, state, operator="unkown"): if state == "offline": check_output([TGTADMIN_CMD, "--offline", self.iqn]) elif state == "ready": check_output([TGTADMIN_CMD, "--ready", self.iqn]) else: raise StorLeverError("state (%s) is not supported" % (state), 400)
def get_smart_info(self): if self.dev_file == "": raise StorLeverError( "scsi_id (%s) has not be recognized" % self.scsi_id, 400) output = check_output( [SMARTCTL_CMD, "-i", "-T", "verypermissive", self.dev_file]) smart_enabled = False auto_offline_enabled = False if "SMART support is: Enabled" in output: smart_enabled = True output = check_output( [SMARTCTL_CMD, "-a", "-T", "verypermissive", self.dev_file]) if "Auto Offline Data Collection: Enabled" in output: auto_offline_enabled = True # filter the copyright lines = output.splitlines() for index, line in enumerate(lines): if line == "": break else: index = 0 detail = "\n".join(lines[index + 1:]) info = { "smart_enabled": smart_enabled, "auto_offline_enabled": auto_offline_enabled, "detail": detail } return info
def __init__(self, vg, name=None, _lv=None): self.lock = vg.lock self.vg = vg if name: with _LVM() as _lvm: with _VG(_lvm, self.vg.name) as _vg: _lv = _vg.get_lv_by_name(name) self.name = _lv.name self.uuid = _lv.uuid self.size = _lv.size self.is_activate = _lv.is_activate() self.origin = _lv.get_origin() self.attr = _lv.get_attr() if self.origin: self.snap_percent = _lv.get_property( 'snap_percent') / 1000000.0 else: self.snap_percent = None elif _lv: self.name = _lv.name self.uuid = _lv.uuid self.size = _lv.size self.is_activate = _lv.is_activate() self.origin = _lv.get_origin() self.attr = _lv.get_attr() if self.origin: self.snap_percent = _lv.get_property( 'snap_percent') / 1000000.0 else: self.snap_percent = None else: raise StorLeverError('No LV name given') self.type = self.TYPE_ABBR.get(self.attr[0], 'unknown') self.permission = self.PERMISSION_ABBR.get(self.attr[1], 'unknown') self.partial = True if self.attr[8] == 'p' else False
def get_module_info(self, module_name): """ get the specific module info/state in the storlever Manager layer """ if module_name not in self.managed_modules: raise StorLeverError("Module(%s) Not Found" % (module_name), 404) module_conf = self.managed_modules[module_name] deps_info_list = [] for rpm in module_conf["rpms"]: installed = True try: check_output([RPM_CMD, "-q", rpm]) except StorLeverCmdError: installed = False deps_info_list.append({ "name": rpm, "type": "rpm", "installed": installed }) for file_path in module_conf["extra_files"]: deps_info_list.append({ "name": file_path, "type": "file", "installed": os.path.exists(file_path) }) module_info = { "module_name": module_conf["module_name"], "requires": deps_info_list, "comment": module_conf["comment"], } return module_info
def set_agent_conf(self, config={}, operator="unkown", *args, **kwargs): if not isinstance(config, dict): raise StorLeverError("Parameter type error", 500) if len(config) == 0 and len(kwargs) == 0: return config.update(kwargs) not_allow_keys = ("active_check_server_list", "passive_check_server_list") config = filter_dict(config, not_allow_keys, True) with self.lock: zabbix_agent_conf = self._load_conf() for name, value in config.items(): if name in zabbix_agent_conf and value is not None: zabbix_agent_conf[name] = value # check config conflict zabbix_agent_conf = self.zabbix_agentd_conf_schema.validate( zabbix_agent_conf) # save new conf self._save_conf(zabbix_agent_conf) self._sync_to_system_conf(zabbix_agent_conf) logger.log( logging.INFO, logger.LOG_TYPE_CONFIG, "Zabbix agent config is updated by operator(%s)" % (operator))
def remove_target_by_iqn(self, iqn, operator="unkown"): with self.lock: tgt_conf = self._load_conf() delete_conf = None for target_conf in tgt_conf["target_list"]: if target_conf["iqn"] == iqn: delete_conf = target_conf if delete_conf is None: raise StorLeverError("tgt target (iqn:%s) Not Found" % (iqn), 404) else: tgt_conf["target_list"].remove(delete_conf) # save new conf self._save_conf(tgt_conf) self._sync_to_system_conf(tgt_conf) try: check_output([TGTADMIN_CMD, "-f", "--delete", iqn]) except StorLeverError: pass logger.log( logging.INFO, logger.LOG_TYPE_CONFIG, "tgt target (iqn:%s) is deleted by operator(%s)" % (iqn, operator))