def add_monitor_conf(self, monitor_name, expression, option="", operator="unknown"): new_monitor_conf = { "monitor_name": monitor_name, "expression": expression, "option": option, } new_monitor_conf = self.monitor_conf_schema.validate(new_monitor_conf) with self.lock: snmp_conf = self._load_conf() monitor_list = snmp_conf["monitor_list"] # check duplicate for monitor_conf in monitor_list: if monitor_conf["monitor_name"] == monitor_name: raise StorLeverError("monitor_name (%s) Already exist" % monitor_name, 400) monitor_list.append(new_monitor_conf) # save new conf self._save_conf(snmp_conf) self._sync_to_system_conf(snmp_conf) logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "SNMP New Monitor (%s) is added by operator(%s)" % (monitor_name, operator))
def set_selinux_state(self, state, user="******"): state_str_to_int = { "enforcing": 1, "permissive": 0, "disabled": 0 } param = state_str_to_int.get(state) if param is not None: old_state = check_output(["/usr/sbin/getenforce"]).lower().strip() if old_state != "disabled": check_output(["/usr/sbin/setenforce", str(param)]) if not os.path.exists(SELINUX_CONF_DIR): os.makedirs(SELINUX_CONF_DIR) conf_path = os.path.join(SELINUX_CONF_DIR, SELINUX_CONF_FILE) conf = properties() conf.delete("SELINUX") conf.apply_to(conf_path) with open(conf_path, "r") as f: content = f.read() if content.endswith("\n") or len(content) == 0: content += "SELINUX=%s\n" % state else: content += "\nSELINUX=%s\n" % state with open(conf_path, "w") as f: f.write(content) logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "selinux state is set to %s by user(%s)" % (state, user))
def remove_target_by_iqn(self, iqn, operator="unkown"): with self.lock: tgt_conf = self._load_conf() delete_conf = None for target_conf in tgt_conf["target_list"]: if target_conf["iqn"] == iqn: delete_conf = target_conf if delete_conf is None: raise StorLeverError("tgt target (iqn:%s) Not Found" % (iqn), 404) else: tgt_conf["target_list"].remove(delete_conf) # save new conf self._save_conf(tgt_conf) self._sync_to_system_conf(tgt_conf) try: check_output([TGTADMIN_CMD, "-f", "--delete", iqn]) except StorLeverError: pass logger.log( logging.INFO, logger.LOG_TYPE_CONFIG, "tgt target (iqn:%s) is deleted by operator(%s)" % (iqn, operator))
def set_ip_config(self, ip=None, netmask=None, gateway=None, user="******"): if ip is None: ip = self.conf.get("IPADDR", "") if netmask is None: netmask = self.conf.get("NETMASK", "") if gateway is None: gateway = self.conf.get("GATEWAY", "") self.conf["IPADDR"] = ip self.conf["NETMASK"] = netmask self.conf["GATEWAY"] = gateway self.conf["BOOTPROTO"] = "none" # write to config file self.conf.apply_to(self.conf_file_path) # restart this interface if self.ifconfig_interface.is_up(): check_output([IFDOWN, self.name]) check_output([IFUP, self.name]) # log the operation logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "Network interface (%s) is configured with (IP:%s, Netmask:%s, \ Gateway:%s) by user(%s)" % (self.name, ip, netmask, gateway, user))
def update_monitor_conf(self, monitor_name, expression=None, option=None, operator="unknown"): with self.lock: snmp_conf = self._load_conf() monitor_list = snmp_conf["monitor_list"] update_monitor_index = 0 for index, monitor_conf in enumerate(monitor_list): if monitor_conf["monitor_name"] == monitor_name: update_monitor_index = index break else: raise StorLeverError("Monitor (%s) Not Found" % (monitor_name), 404) if expression is not None: monitor_conf["expression"] = expression if option is not None: monitor_conf["option"] = option monitor_list[update_monitor_index] = \ self.monitor_conf_schema.validate(monitor_conf) # save new conf self._save_conf(snmp_conf) self._sync_to_system_conf(snmp_conf) logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "SNMP Monitor (%s) config is updated by operator(%s)" % (monitor_name, operator))
def set_monitor_list(self, monitor_list=[], operator="unkown"): monitor_list = Schema([self.smartd_monitor_conf_schema]).validate(monitor_list) for i, monitor_conf in enumerate(monitor_list[:]): monitor_list[i] = filter_dict(monitor_conf, ("dev", "mail_to", "mail_test", "mail_exec", "schedule_regexp")) with self.lock: smartd_conf = self._load_conf() smartd_conf["monitor_list"] = monitor_list # check validation for monitor_conf in smartd_conf["monitor_list"]: if not os.path.exists(monitor_conf["dev"]): raise StorLeverError("Device (%s) not found" % (monitor_conf["dev"]), 404) else: mode = os.stat(monitor_conf["dev"])[ST_MODE] if not S_ISBLK(mode): raise StorLeverError("Device (%s) not block device" % (monitor_conf["dev"]), 400) if monitor_conf["mail_exec"] != "" and not os.path.exists(monitor_conf["mail_exec"]): raise StorLeverError("mail_exec (%s) not found" % (monitor_conf["mail_exec"]), 404) # save new conf self._save_conf(smartd_conf) self._sync_to_system_conf(smartd_conf) logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "Smartd monitor list is updated by operator(%s)" % (operator))
def set_ip_config(self, ip=None, netmask=None, gateway=None, user="******"): if ip is None: ip = self.conf.get("IPADDR", "") if netmask is None: netmask = self.conf.get("NETMASK", "") if gateway is None: gateway = self.conf.get("GATEWAY", "") self.conf["IPADDR"] = ip self.conf["NETMASK"] = netmask self.conf["GATEWAY"] = gateway self.conf["BOOTPROTO"] = "none" # write to config file self.conf.apply_to(self.conf_file_path) # restart this interface if self.ifconfig_interface.is_up(): check_output([IFDOWN, self.name]) check_output([IFUP, self.name]) # log the operation logger.log( logging.INFO, logger.LOG_TYPE_CONFIG, "Network interface (%s) is configured with (IP:%s, Netmask:%s, \ Gateway:%s) by user(%s)" % (self.name, ip, netmask, gateway, user))
def append_export_conf(self, name, path="/", clients=[], operator="unkown"): if path != "" and not os.path.exists(path): raise StorLeverError("path(%s) does not exists" % (path), 400) new_export_point = { "name": name, "path": path, "clients": clients, } new_export_point = self.export_point_conf_schema.validate(new_export_point) with self.lock: nfs_conf = self._load_conf() # check duplication for point in nfs_conf["export_point_list"]: if path == point["path"]: raise StorLeverError("export with path(%s) already in nfs export table" % (path), 400) if name == point["name"]: raise StorLeverError("export with name(%s) already in nfs export table" % (name), 400) nfs_conf["export_point_list"].append(new_export_point) # save new conf self._save_conf(nfs_conf) self._sync_to_system_conf(nfs_conf) logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "NFS export with path(%s) config is added by operator(%s)" % (path, operator))
def set_export_conf(self, name, path=None, clients=None, operator="unkown"): if path is not None and path != "" and not os.path.exists(path): raise StorLeverError("path(%s) does not exists" % (path), 400) with self.lock: nfs_conf = self._load_conf() for index, point in enumerate(nfs_conf["export_point_list"]): if name == point["name"]: break else: raise StorLeverError("export(%s) not found" % (name), 404) if path is not None: point["path"] = path if clients is not None: point["clients"] = clients nfs_conf["export_point_list"][index] = self.export_point_conf_schema.validate(point) # save new conf self._save_conf(nfs_conf) self._sync_to_system_conf(nfs_conf) logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "NFS export (name:%s) config is updated by operator(%s)" % (name, operator))
def add_monitor_conf(self, monitor_name, expression, option="", operator="unknown"): new_monitor_conf = { "monitor_name": monitor_name, "expression": expression, "option": option, } new_monitor_conf = self.monitor_conf_schema.validate(new_monitor_conf) with self.lock: snmp_conf = self._load_conf() monitor_list = snmp_conf["monitor_list"] # check duplicate for monitor_conf in monitor_list: if monitor_conf["monitor_name"] == monitor_name: raise StorLeverError( "monitor_name (%s) Already exist" % monitor_name, 400) monitor_list.append(new_monitor_conf) # save new conf self._save_conf(snmp_conf) self._sync_to_system_conf(snmp_conf) logger.log( logging.INFO, logger.LOG_TYPE_CONFIG, "SNMP New Monitor (%s) is added by operator(%s)" % (monitor_name, operator))
def update_monitor_conf(self, monitor_name, expression=None, option=None, operator="unknown"): with self.lock: snmp_conf = self._load_conf() monitor_list = snmp_conf["monitor_list"] update_monitor_index = 0 for index, monitor_conf in enumerate(monitor_list): if monitor_conf["monitor_name"] == monitor_name: update_monitor_index = index break else: raise StorLeverError("Monitor (%s) Not Found" % (monitor_name), 404) if expression is not None: monitor_conf["expression"] = expression if option is not None: monitor_conf["option"] = option monitor_list[update_monitor_index] = \ self.monitor_conf_schema.validate(monitor_conf) # save new conf self._save_conf(snmp_conf) self._sync_to_system_conf(snmp_conf) logger.log( logging.INFO, logger.LOG_TYPE_CONFIG, "SNMP Monitor (%s) config is updated by operator(%s)" % (monitor_name, operator))
def set_host_list(self, host_list, user="******"): host_list = HOST_LIST_SCHEMA.validate(host_list) if os.path.exists(ETC_HOSTS_FILE): with open(ETC_HOSTS_FILE, "r") as f: lines = f.readlines() else: lines = [] if "# begin storlever\n" in lines: before_storlever = lines[0:lines.index("# begin storlever\n")] else: before_storlever = lines[0:] if before_storlever and (not before_storlever[-1].endswith("\n")): before_storlever[-1] += "\n" if "# end storlever\n" in lines: after_storlever = lines[lines.index("# end storlever\n") + 1:] else: after_storlever = [] with open(ETC_HOSTS_FILE, "w") as f: f.writelines(before_storlever) f.write("# begin storlever\n") for host in host_list: f.write("%s %s %s\n" % ( host["addr"], host["hostname"], host["alias"] )) f.write("# end storlever\n") f.writelines(after_storlever) logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "Host list is updated by user(%s)" % user)
def delete(self, md_name): """ Destroy a RAID device. WARNING This will zero the superblock of all members of the RAID array.. CLI Example: .. code-block:: bash salt '*' raid.destroy /dev/md0 """ md = MD(md_name, self._lock) md_device = _md_name_to_dev_file(md_name) stop_cmd = '/sbin/mdadm --stop {0}'.format(md_device) zero_cmd = '/sbin/mdadm --zero-superblock {0}' with self._lock: check_output(stop_cmd.split()) for _, member in md.members.iteritems(): try: check_output(zero_cmd.format(member['device']).split()) except StorLeverError: logger.log(logging.WARNING, logger.LOG_TYPE_ERROR, "Failed zero superblock of device {0}".format(md_device), exc_info=True) self._update_mdadm_conf() self.refresh() logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "MD {0} removed successfully".format(md_device))
def grow_raid(self, device): grow_cmd = '/sbin/mdadm --grow {0} --raid-device={1}'.format(self.dev_file, device) with self._lock: check_output(grow_cmd.split()) self.refresh() logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "MD {0} grows successfully with block device {1}".format(self.dev_file, device))
def del_incominguser(self, name, operator="unkown"): with self.mgr.lock: conf = self.mgr._get_target_conf(self.iqn) found = None for user in conf["incominguser_list"]: user_name, sep, password = user.partition(":") if user_name == name: found = user if found is None: raise StorLeverError( "tgt target (iqn:%s) incominguser (%s) Not Found" % (self.iqn, name), 404) else: conf["incominguser_list"].remove(found) self.mgr._set_target_conf(self.iqn, conf) self.conf = conf # update the cache target conf self._update_target() logger.log( logging.INFO, logger.LOG_TYPE_CONFIG, "tgt target (iqn:%s) incominguser (%s) is deleted by operator(%s)" % (self.iqn, name, operator))
def update_community_conf(self, community_name, ipv6=None, source=None, oid=None, read_only=None, operator="unknown"): with self.lock: snmp_conf = self._load_conf() community_list = snmp_conf["community_list"] update_comunity_index = 0 for index, community_conf in enumerate(community_list): if community_conf["community_name"] == community_name: update_comunity_index = index break else: raise StorLeverError("Community (%s) Not Found" % (community_name), 404) community_conf = community_list[update_comunity_index] if ipv6 is not None: community_conf["ipv6"] = ipv6 if source is not None: community_conf["source"] = source if oid is not None: community_conf["oid"] = oid if read_only is not None: community_conf["read_only"] = read_only community_list[update_comunity_index] = \ self.community_conf_schema.validate(community_conf) # save new conf self._save_conf(snmp_conf) self._sync_to_system_conf(snmp_conf) logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "SNMP Community (%s) config is updated by operator(%s)" % (community_name, operator))
def delete(self, md_name): """ Destroy a RAID device. WARNING This will zero the superblock of all members of the RAID array.. CLI Example: .. code-block:: bash salt '*' raid.destroy /dev/md0 """ md = MD(md_name, self._lock) md_device = _md_name_to_dev_file(md_name) stop_cmd = '/sbin/mdadm --stop {0}'.format(md_device) zero_cmd = '/sbin/mdadm --zero-superblock {0}' with self._lock: check_output(stop_cmd.split()) for _, member in md.members.iteritems(): try: check_output(zero_cmd.format(member['device']).split()) except StorLeverError: logger.log(logging.WARNING, logger.LOG_TYPE_ERROR, "Failed zero superblock of device {0}".format( md_device), exc_info=True) self._update_mdadm_conf() self.refresh() logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "MD {0} removed successfully".format(md_device))
def set_agent_conf(self, config={}, operator="unkown", *args, **kwargs): if not isinstance(config, dict): raise StorLeverError("Parameter type error", 500) if len(config) == 0 and len(kwargs) == 0: return config.update(kwargs) not_allow_keys = ( "active_check_server_list", "passive_check_server_list" ) config = filter_dict(config, not_allow_keys, True) with self.lock: zabbix_agent_conf = self._load_conf() for name, value in config.items(): if name in zabbix_agent_conf and value is not None: zabbix_agent_conf[name] = value # check config conflict zabbix_agent_conf = self.zabbix_agentd_conf_schema.validate(zabbix_agent_conf) # save new conf self._save_conf(zabbix_agent_conf) self._sync_to_system_conf(zabbix_agent_conf) logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "Zabbix agent config is updated by operator(%s)" % (operator))
def mod_dir_owner(self, relative_path, user=None, group=None, operator="unknown"): if not self.is_available(): raise StorLeverError("File system is unavailable", 500) if "." in relative_path or ".." in relative_path: raise StorLeverError("name cannot include . or ..", 400) if relative_path.startswith("/"): raise StorLeverError("name must be a relative path name", 400) path = os.path.join(self.fs_conf["mount_point"], relative_path) if not os.path.exists(path): raise StorLeverError("Share directory not found", 404) umgr = user_mgr() if user is None: uid = -1 else: uid = umgr.get_user_info_by_name(user)["uid"] if group is None: gid = -1 else: gid = umgr.get_group_by_name(group)["gid"] os.chown(path, uid, gid) logger.log( logging.INFO, logger.LOG_TYPE_CONFIG, "Share directory (%s) owner is changed to (%s:%s)" " by user(%s)" % (path, user, group, operator))
def quota_group_set(self, group, block_softlimit=0, block_hardlimit=0, inode_softlimit=0, inode_hardlimit=0, operator="unknown"): if not self.is_available(): raise StorLeverError("File system is unavailable", 500) setquota_agrs = [ SETQUOTA_BIN, "-g", group, str(block_softlimit), str(block_hardlimit), str(inode_softlimit), str(inode_hardlimit), self.fs_conf["mount_point"] ] check_output(setquota_agrs) logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "File System(%s) quota for group(%s) is changed to " "(%d,%d,%d,%d)" " by user(%s)" % (self.name, group, block_softlimit, block_hardlimit, inode_softlimit, inode_hardlimit, operator))
def set_hostname(self, hostname, user="******"): # get old hostname old_hostname = self.get_hostname() # change hostname in system check_output(["/bin/hostname", hostname]) # change hostname in /etc/sysconfig/network network_propeties = properties(HOSTNAME=hostname) network_propeties.apply_to(ETC_NETWORK_FILE) # add ip for this hostname host_list = self.get_host_list() exist = False for host in host_list: if host["hostname"] == old_hostname: host["hostname"] = hostname exist = True if not exist: # ipv4 host_list.append({ "addr": "127.0.0.1", "hostname": hostname, "alias": "" }) # ipv6 host_list.append({ "addr": "::1", "hostname": hostname, "alias": "" }) self.set_host_list(host_list, user=user) logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "hostname is changed to %s by user(%s)" % (hostname, user))
def del_smb_account(self, username, operator="unkown"): check_output([PDBEDIT_CMD, '-x', '-u', username]) logger.log( logging.INFO, logger.LOG_TYPE_CONFIG, "Samba user (%s) is delete from password DB by operator(%s)" % (username, operator))
def create_target(self, iqn, operator="unkown"): target_conf = { "iqn": iqn, "initiator_addr_list": [], "initiator_name_list": [], "incominguser_list": [], "outgoinguser_list": [], } target_conf = self.target_conf_schema.validate(target_conf) with self.lock: tgt_conf = self._load_conf() # check duplicate for target_conf in tgt_conf["target_list"]: if target_conf["iqn"] == iqn: raise StorLeverError("Target (iqn:%s) Already exist" % iqn, 400) tgt_conf["target_list"].append(target_conf) # save new conf self._save_conf(tgt_conf) self._sync_to_system_conf(tgt_conf) try: check_output([TGTADMIN_CMD, "--execute"]) except StorLeverError: pass logger.log( logging.INFO, logger.LOG_TYPE_CONFIG, "tgt target (iqn:%s) config is added by operator(%s)" % (iqn, operator))
def set_smb_conf(self, config={}, operator="unkown", **kwargs): if not isinstance(config, dict): raise StorLeverError("Parameter type error", 500) if len(config) == 0 and len(kwargs) == 0: return config.update(kwargs) not_allowed_keys = ("share_list", ) config = filter_dict(config, not_allowed_keys, True) if "guest_account" in config and config["guest_account"] is not None: try: user_mgr().get_user_info_by_name(config["guest_account"]) except Exception as e: raise StorLeverError("guest_account does not exist", 400) with self.lock: smb_conf = self._load_conf() for name, value in config.items(): if name == "share_list": continue if name in smb_conf and value is not None: smb_conf[name] = value # check config conflict smb_conf = self.smb_conf_schema.validate(smb_conf) # save new conf self._save_conf(smb_conf) self._sync_to_system_conf(smb_conf) logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "Samba config is updated by user(%s)" % (operator))
def set_mail_conf(self, config={}, operator="unkown", *args, **kwargs): if not isinstance(config, dict): raise StorLeverError("Parameter type error", 500) if len(config) == 0 and len(kwargs) == 0: return config.update(kwargs) with self.lock: mail_conf = self._load_conf() for name, value in config.items(): if name in mail_conf and value is not None: mail_conf[name] = value # check config conflict mail_conf = self.mail_conf_schema.validate(mail_conf) if mail_conf["smtp_server"] != "": if mail_conf["email_addr"] == "": raise StorLeverError( "email_addr cannot be empty if smtp_server exists", 400) if mail_conf["password"] == "": raise StorLeverError( "password cannot be empty if smtp_server exists", 400) # save new conf self._save_conf(mail_conf) self._sync_to_system_conf(mail_conf) logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "Mail config is updated by operator(%s)" % (operator))
def disable_auto_start(self, user="******"): check_output( [CHKCONFIG, "--level", SET_CHK_LEVEL, self.init_script, "off"]) logger.log( logging.INFO, logger.LOG_TYPE_CONFIG, "Service %s auto start is disabled by user(%s)" % (self.name, user))
def set_basic_conf(self, config={}, operator="unkown", **kwargs): if not isinstance(config, dict): raise StorLeverError("Parameter type error", 500) if len(config) == 0 and len(kwargs) == 0: return config.update(kwargs) not_allowed_keys = ( "community_list", "trapsink_list", "monitor_list" ) config = filter_dict(config, not_allowed_keys, True) with self.lock: snmp_conf = self._load_conf() for name, value in config.items(): if name in snmp_conf and value is not None: snmp_conf[name] = value # check config conflict snmp_conf = self.snmp_conf_schema.validate(snmp_conf) # save new conf self._save_conf(snmp_conf) self._sync_to_system_conf(snmp_conf) logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "SNMP basic config is updated by operator(%s)" % (operator))
def add_community_conf(self, community_name, ipv6=False, source="", oid="", read_only=True, operator="unknown"): new_community_conf = { "community_name": community_name, "ipv6": ipv6, "source": source, "oid": oid, "read_only": read_only, } new_community_conf = self.community_conf_schema.validate(new_community_conf) with self.lock: snmp_conf = self._load_conf() community_list = snmp_conf["community_list"] # check duplicate for community_conf in community_list: if community_conf["community_name"] == community_name: raise StorLeverError("Community (%s) Already exist" % community_name, 400) community_list.append(new_community_conf) # save new conf self._save_conf(snmp_conf) self._sync_to_system_conf(snmp_conf) logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "SNMP New Community (%s) is added by operator(%s)" % (community_name, operator))
def set_mail_conf(self, config={}, operator="unkown", *args, **kwargs): if not isinstance(config, dict): raise StorLeverError("Parameter type error", 500) if len(config) == 0 and len(kwargs) == 0: return config.update(kwargs) with self.lock: mail_conf = self._load_conf() for name, value in config.items(): if name in mail_conf and value is not None: mail_conf[name] = value # check config conflict mail_conf = self.mail_conf_schema.validate(mail_conf) if mail_conf["smtp_server"] != "": if mail_conf["email_addr"] == "": raise StorLeverError("email_addr cannot be empty if smtp_server exists", 400) if mail_conf["password"] == "": raise StorLeverError("password cannot be empty if smtp_server exists", 400) # save new conf self._save_conf(mail_conf) self._sync_to_system_conf(mail_conf) logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "Mail config is updated by operator(%s)" % (operator))
def create_target(self, iqn, operator="unkown"): target_conf ={ "iqn": iqn, "initiator_addr_list": [], "initiator_name_list": [], "incominguser_list": [], "outgoinguser_list": [], } target_conf = self.target_conf_schema.validate(target_conf) with self.lock: tgt_conf = self._load_conf() # check duplicate for target_conf in tgt_conf["target_list"]: if target_conf["iqn"] == iqn: raise StorLeverError("Target (iqn:%s) Already exist" % iqn, 400) tgt_conf["target_list"].append(target_conf) # save new conf self._save_conf(tgt_conf) self._sync_to_system_conf(tgt_conf) try: check_output([TGTADMIN_CMD, "--execute"]) except StorLeverError: pass logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "tgt target (iqn:%s) config is added by operator(%s)" % (iqn, operator))
def quota_group_set(self, group, block_softlimit=0, block_hardlimit=0, inode_softlimit=0, inode_hardlimit=0, operator="unknown"): if not self.is_available(): raise StorLeverError("File system is unavailable", 500) setquota_agrs = [ SETQUOTA_BIN, "-g", group, str(block_softlimit), str(block_hardlimit), str(inode_softlimit), str(inode_hardlimit), self.fs_conf["mount_point"] ] check_output(setquota_agrs) logger.log( logging.INFO, logger.LOG_TYPE_CONFIG, "File System(%s) quota for group(%s) is changed to " "(%d,%d,%d,%d)" " by user(%s)" % (self.name, group, block_softlimit, block_hardlimit, inode_softlimit, inode_hardlimit, operator))
def set_agent_conf(self, config={}, operator="unkown", *args, **kwargs): if not isinstance(config, dict): raise StorLeverError("Parameter type error", 500) if len(config) == 0 and len(kwargs) == 0: return config.update(kwargs) not_allow_keys = ("active_check_server_list", "passive_check_server_list") config = filter_dict(config, not_allow_keys, True) with self.lock: zabbix_agent_conf = self._load_conf() for name, value in config.items(): if name in zabbix_agent_conf and value is not None: zabbix_agent_conf[name] = value # check config conflict zabbix_agent_conf = self.zabbix_agentd_conf_schema.validate( zabbix_agent_conf) # save new conf self._save_conf(zabbix_agent_conf) self._sync_to_system_conf(zabbix_agent_conf) logger.log( logging.INFO, logger.LOG_TYPE_CONFIG, "Zabbix agent config is updated by operator(%s)" % (operator))
def add_component(self, device): add_cmd = '/sbin/mdadm {0} --add {1}'.format(self.dev_file, device) with self._lock: check_output(add_cmd.split()) self.refresh() logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "Block device {0} added to MD {1} successfully".format(device, self.dev_file))
def set_selinux_state(self, state, user="******"): state_str_to_int = {"enforcing": 1, "permissive": 0, "disabled": 0} param = state_str_to_int.get(state) if param is not None: old_state = check_output(["/usr/sbin/getenforce"]).lower().strip() if old_state != "disabled": check_output(["/usr/sbin/setenforce", str(param)]) if not os.path.exists(SELINUX_CONF_DIR): os.makedirs(SELINUX_CONF_DIR) conf_path = os.path.join(SELINUX_CONF_DIR, SELINUX_CONF_FILE) conf = properties() conf.delete("SELINUX") conf.apply_to(conf_path) with open(conf_path, "r") as f: content = f.read() if content.endswith("\n") or len(content) == 0: content += "SELINUX=%s\n" % state else: content += "\nSELINUX=%s\n" % state with open(conf_path, "w") as f: f.write(content) logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "selinux state is set to %s by user(%s)" % (state, user))
def mod_dir_owner(self, relative_path, user = None, group = None, operator="unknown"): if not self.is_available(): raise StorLeverError("File system is unavailable", 500) if "." in relative_path or ".." in relative_path: raise StorLeverError("name cannot include . or ..", 400) if relative_path.startswith("/"): raise StorLeverError("name must be a relative path name", 400) path = os.path.join(self.fs_conf["mount_point"], relative_path) if not os.path.exists(path): raise StorLeverError("Share directory not found", 404) umgr = user_mgr() if user is None: uid = -1 else: uid = umgr.get_user_info_by_name(user)["uid"] if group is None: gid = -1 else: gid = umgr.get_group_by_name(group)["gid"] os.chown(path, uid, gid) logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "Share directory (%s) owner is changed to (%s:%s)" " by user(%s)" % (path, user, group, operator))
def set_host_list(self, host_list, user="******"): host_list = HOST_LIST_SCHEMA.validate(host_list) if os.path.exists(ETC_HOSTS_FILE): with open(ETC_HOSTS_FILE, "r") as f: lines = f.readlines() else: lines = [] if "# begin storlever\n" in lines: before_storlever = lines[0:lines.index("# begin storlever\n")] else: before_storlever = lines[0:] if before_storlever and (not before_storlever[-1].endswith("\n")): before_storlever[-1] += "\n" if "# end storlever\n" in lines: after_storlever = lines[lines.index("# end storlever\n") + 1:] else: after_storlever = [] with open(ETC_HOSTS_FILE, "w") as f: f.writelines(before_storlever) f.write("# begin storlever\n") for host in host_list: f.write("%s %s %s\n" % (host["addr"], host["hostname"], host["alias"])) f.write("# end storlever\n") f.writelines(after_storlever) logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "Host list is updated by user(%s)" % user)
def del_fs(self, fs_name, user="******"): """delete a filesystem from storlever the file would be deleted from the storlever's config file and would be unmount from linux system """ with self.lock: fs_dict = self._load_conf() if fs_name not in fs_dict: raise StorLeverError("filesystem(%s) does not exist" % fs_name, 400) fs_conf = fs_dict[fs_name] del fs_dict[fs_name] #umount fs first. if it failed, don't delete it in the config self._umount_fs(fs_name, fs_conf) self._save_conf(fs_dict) self._sync_to_fstab(fs_dict) try: os.rmdir(fs_conf["mount_point"]) except OSError as e: pass logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "filesystem %s (dev:%s, mount_point:%s, option:%s) " "is deleted by user(%s)" % (fs_name, fs_conf['dev_file'], fs_conf['mount_point'], fs_conf['mount_option'], user))
def logout(self, operator="unkown"): cmd = [ISCSIADM_CMD, "-m", "node","--logout", "-T", self.target, "-p", self.portal] outlines = check_output(cmd, input_ret=[2, 6, 7, 21, 22]).splitlines() logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "iscsi initiator node (%s, %s) is logout by operator(%s)" % (self.target, self.portal, operator))
def add_fs(self, fs_name, type, dev_file, mount_option="", check_onboot=False, comment="", user="******"): """add a filesystem with the given properties to storlever The new filesystem would be mount on the specific directory(/mnt/FS_NAME) and would be added to the storlever's fs config """ # check type if type not in self.support_fs_type: raise StorLeverError("type(%s) does not support" % type, 400) # check mount point mount_point = os.path.join(MOUNT_DIR, fs_name) if os.path.exists(mount_point): if not os.path.isdir(mount_point): raise StorLeverError("mount point(%s) already exists and is not directory" % mount_point) else: # create mount point os.makedirs(mount_point) # don't check dev file exist, because for the network fs, the dev file is a network id # if not os.path.exists(dev_file): # raise StorLeverError("dev file(%s) does not exist" % dev_file, 400) dev_uuid = "" if (not dev_file.startswith("/dev/mapper")) and os.path.exists(dev_file): dev_uuid = self._dev_file_to_uuid(dev_file) fs_conf = { "type": type, "dev_file": dev_file, "dev_uuid": dev_uuid, "mount_point": mount_point, "mount_option": mount_option, "check_onboot": check_onboot, "comment": comment } fs_conf = self.fs_conf_schema.validate(fs_conf) with self.lock: fs_dict = self._load_conf() if fs_name in fs_dict: raise StorLeverError("filesystem(%s) already exist" % fs_name, 400) # mount fs first self._mount_fs(fs_name, fs_conf) fs_dict[fs_name] = fs_conf self._save_conf(fs_dict) self._sync_to_fstab(fs_dict) logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "New filesystem %s (dev:%s, mount_point:%s, option:%s) " "is added by user(%s)" % (fs_name, dev_file, mount_point, mount_option, user))
def set_share_conf(self, share_name, path=None, comment=None, create_mask=None, directory_mask=None, guest_ok=None, read_only=None, browseable=None, force_create_mode=None, force_directory_mode=None, valid_users=None, write_list=None, veto_files=None, operator="unkown"): if path is not None and path != "" and not os.path.exists(path): raise StorLeverError("path(%s) does not exists" % (path), 400) with self.lock: smb_conf = self._load_conf() share_conf = smb_conf["share_list"].get(share_name) if share_conf is None: raise StorLeverError("share_conf(%s) not found" % (share_conf), 404) if path is not None: share_conf["path"] = path if comment is not None: share_conf["comment"] = comment if create_mask is not None: share_conf["create_mask"] = create_mask if directory_mask is not None: share_conf["directory_mask"] = directory_mask if guest_ok is not None: share_conf["guest_ok"] = guest_ok if read_only is not None: share_conf["read_only"] = read_only if browseable is not None: share_conf["browseable"] = browseable if force_create_mode is not None: share_conf["force_create_mode"] = force_create_mode if force_directory_mode is not None: share_conf["force_directory_mode"] = force_directory_mode if valid_users is not None: share_conf["valid_users"] = valid_users if write_list is not None: share_conf["write_list"] = write_list if veto_files is not None: share_conf["veto_files"] = veto_files # save new conf self._save_conf(smb_conf) self._sync_to_system_conf(smb_conf) logger.log( logging.INFO, logger.LOG_TYPE_CONFIG, "Samba share (%s) config is updated by operator(%s)" % (share_name, operator))
def add_lun(self, lun, path, device_type="disk", bs_type="rdwr", direct_map=False, write_cache=True, readonly=False, online=True, scsi_id="", scsi_sn="", operator="unkown"): if path != "" and not os.path.exists(path): raise StorLeverError("path(%s) does not exists" % (path), 400) lun_conf = { "lun": lun, "path": path, "device_type": device_type, "bs_type": bs_type, "direct_map": direct_map, "write_cache": write_cache, "readonly": readonly, "online": online, "scsi_id": scsi_id, "scsi_sn": scsi_sn } lun_conf = self.mgr.lun_conf_schema.validate(lun_conf) # check conflict if device_type == "pt": if bs_type != "sg": raise StorLeverError("pt device's bs_type must be sg", 400) if not path.startswith("/dev/sg"): raise StorLeverError("pt device's path must be /dev/sg*", 400) elif device_type in ("tape", "ssc"): if bs_type != "ssc": raise StorLeverError("ssc device 's bs_type must be ssc", 400) else: if bs_type in ("sg", "ssc"): raise StorLeverError("bs_type cannot be ssc/sg", 400) if direct_map: mode = os.stat(path)[ST_MODE] if not (S_ISBLK(mode) or S_ISCHR(mode)): raise StorLeverError("path must be a device file if direct_map is true", 400) with self.mgr.lock: conf = self.mgr._get_target_conf(self.iqn) found = None for l in conf["lun_list"]: if l["lun"] == lun: found = l if found is not None: raise StorLeverError("tgt target (iqn:%s) Lun (%d) already exists" % (self.iqn, lun), 400) else: conf["lun_list"].append(lun_conf) self.mgr._set_target_conf(self.iqn, conf) self.conf = conf # update the cache target conf self._update_target() logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "tgt target (iqn:%s) Lun (%d) is added by operator(%s)" % (self.iqn, lun, operator))
def group_del_by_name(self, name, user="******"): if name == "root": raise StorLeverError("cannot del group root", 400) cmds = ["/usr/sbin/groupdel"] cmds.append(name) check_output(cmds, input_ret=[2, 6, 8]) logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "System group %s is deleted by user(%s)" % (name, user))
def add_component(self, device): add_cmd = '/sbin/mdadm {0} --add {1}'.format(self.dev_file, device) with self._lock: check_output(add_cmd.split()) self.refresh() logger.log( logging.INFO, logger.LOG_TYPE_CONFIG, "Block device {0} added to MD {1} successfully".format( device, self.dev_file))
def logout_session(self, session_id, operator="unkown"): outlines = check_output( [ISCSIADM_CMD, "-m", "session", "-u", "-r", str(session_id)], input_ret=[2, 6, 7, 21, 22]).splitlines() logger.log( logging.INFO, logger.LOG_TYPE_CONFIG, "iscsi initiator session (%s) is logout by operator(%s)" % (str(session_id), operator))
def remove_component(self, device): fail_cmd = '/sbin/mdadm {0} --fail {1}'.format(self.dev_file, device) remove_cmd = '/sbin/mdadm {0} --remove {1}'.format(self.dev_file, device) with self._lock: check_output(fail_cmd.split()) check_output(remove_cmd.split()) self.refresh() logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "Block device {0} detached from MD {1} created successfully".format(device, self.dev_file))
def up(self, user="******"): self.conf["ONBOOT"] = "yes" self.save_conf() check_output([IFUP, self.name]) # log the operation logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "Network interface (%s) is up by user(%s)" % (self.name, user))
def down(self, user="******"): self.conf["ONBOOT"] = "no" self.save_conf() check_output([IFDOWN, self.name]) # log the operation logger.log( logging.INFO, logger.LOG_TYPE_CONFIG, "Network interface (%s) is up by user(%s)" % (self.name, user))
def group_add(self, name, gid=None, user="******"): cmds = ["/usr/sbin/groupadd"] if gid is not None: cmds.append("-g") cmds.append("%d" % int(gid)) cmds.append(name) check_output(cmds, input_ret=[2, 3, 4, 9]) logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "New system group %s is created by user(%s)" % (name, user))
def grow_raid(self, device): grow_cmd = '/sbin/mdadm --grow {0} --raid-device={1}'.format( self.dev_file, device) with self._lock: check_output(grow_cmd.split()) self.refresh() logger.log( logging.INFO, logger.LOG_TYPE_CONFIG, "MD {0} grows successfully with block device {1}".format( self.dev_file, device))
def system_restore(self, user="******"): # call the register callback function for system_restore for callback in self.system_restore_cb: callback() self._clear_conf_dir() # invoke the other module's interface to restore logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "Storlever system is totally restored by user(%s)" % user)
def create_iface(self, iface_name, operator="unkown"): iface_list = self.get_iface_list() for iface in iface_list: if iface.iscsi_ifacename == iface_name: raise StorLeverError("iface (%s) already exists" % iface_name, 400) check_output([ISCSIADM_CMD, "-m", "iface", "-I", iface_name, "-o", "new"], input_ret=[2, 6, 7, 21, 22]) logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "iscsi initiator iface (%s) is created by operator(%s)" % (iface_name, operator))
def set_conf(self, name, value, operator="unkown"): name = str(name).strip() value = str(value).strip() check_output([ISCSIADM_CMD, "-m", "iface", "-I", self.iscsi_ifacename, "-o", "update", "-n", name, "-v", value], input_ret=[2, 6, 7, 21, 22]) self._refresh_property() logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "iscsi initiator iface (%s) conf (%s:%s) is updated by operator(%s)" % (self.iscsi_ifacename, name, value, operator))