def test_donot_care(self): schema = Schema({ 'key': str, DoNotCare(str): object }) data = schema.validate({'key': 'abc', 'key2': 'bbb', 'key3': [1, 2, 3]}) self.assertEqual({'key': 'abc', 'key2': 'bbb', 'key3': [1, 2, 3]}, data) with self.assertRaises(SchemaError): schema.validate({'key2': 'bbb', 'key3': [1, 2, 3]})
def test_list(self): schema = Schema([str]) self.assertEqual(schema.validate(['abc', 'bbc', 'ddc']), ['abc', 'bbc', 'ddc']) with self.assertRaises(SchemaError): schema.validate(['abc', 123, 'bbc']) schema = Schema([IntVal(min=10, max=20)]) self.assertEqual(schema.validate([10, 12, 19, 11]), [10, 12, 19, 11]) with self.assertRaises(SchemaError): schema.validate([10, 12, 21])
def test_default_value(self): schema = Schema({ "key": str, Optional('op_key'): Default(IntVal(min=10), default=50) }) data = schema.validate({'key': 'abc'}) self.assertEqual({'key': 'abc', 'op_key': 50}, data) data = schema.validate({'key': 'abc', 'op_key': 20}) self.assertEqual({'key': 'abc', 'op_key': 20}, data) with self.assertRaises(SchemaError): schema.validate({'key': 'abc', 'op_key': 0})
def test_optional_value(self): schema = Schema({ "key": str, Optional("op_key"): IntVal() }) data = schema.validate({"key": "abc"}) self.assertEqual({"key": "abc"}, data) data = schema.validate({'key': 'abc', 'op_key': 123}) self.assertEqual({'key': 'abc', 'op_key': 123}, data) with self.assertRaises(SchemaError): schema.validate({'key': 'abc', 'op_key': 'bcd'})
def test_donot_care(self): schema = Schema({'key': str, DoNotCare(str): object}) data = schema.validate({ 'key': 'abc', 'key2': 'bbb', 'key3': [1, 2, 3] }) self.assertEqual({ 'key': 'abc', 'key2': 'bbb', 'key3': [1, 2, 3] }, data) with self.assertRaises(SchemaError): schema.validate({'key2': 'bbb', 'key3': [1, 2, 3]})
def test_strre_value(self): schema = Schema(StrRe("^(abc|efg)$")) self.assertEquals("abc", schema.validate("abc")) self.assertEquals("efg", schema.validate("efg")) with self.assertRaises(SchemaError): schema.validate("ebc") with self.assertRaises(SchemaError): schema.validate("abcdefg") with self.assertRaises(SchemaError): schema.validate(0) with self.assertRaises(SchemaError): schema.validate(1)
def test_autodel(self): schema = Schema({'key': str, AutoDel(str): object}) schema2 = Schema({'key': str}) data = schema.validate({ 'key': 'abc', 'key2': 'bbb', 'key3': [1, 2, 3] }) self.assertEqual({'key': 'abc'}, data) with self.assertRaises(SchemaError): schema2.validate({'key': 'abc', 'key2': 'bbb', 'key3': [1, 2, 3]}) with self.assertRaises(SchemaError): schema.validate({'key2': 'bbb', 1: [1, 2, 3]})
def set_restrict_list(self, restrict_list=[], operator="unknown"): restrict_list = Schema([self.ntp_restrict_conf_schema]).validate(restrict_list) with self.lock: ntp_conf = self._load_conf() ntp_conf["restrict_list"] = restrict_list # save new conf self._save_conf(ntp_conf) self._sync_to_system_conf(ntp_conf) logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "NTP Restrict config list is updated by operator(%s)" % operator)
def set_trap_sink_list(self, trap_sink_list=[], operator="unknown"): trap_sink_list = Schema([self.sink_conf_schema ]).validate(trap_sink_list) with self.lock: snmp_conf = self._load_conf() snmp_conf["trapsink_list"] = trap_sink_list # save new conf self._save_conf(snmp_conf) self._sync_to_system_conf(snmp_conf) logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "SNMP trap sink list is updated by operator(%s)" % operator)
def __init__(self): # need a mutex to protect create/delete bond interface self.lock = lock() self.support_fs_type = {} self.conf_file = os.path.join(STORLEVER_CONF_DIR, FS_CONF_FILE_NAME) self.fs_conf_schema = Schema({ "type": Use(str), # filesystem type "dev_file": Use(str), # dev file "dev_uuid": Use(str), # dev uuid "mount_point": Use(str), # mount point of this fs, "mount_option": Use(str), # mount option of this fs "check_onboot": BoolVal(), # fsck fs on boot Optional("comment"): Default(Use(str), default=""), # comment, AutoDel(str): object # for all other key we auto delete }) self.fs_dict_schema = Schema({ DoNotCare(str): self.fs_conf_schema }) # sync fs conf to fstab on boot self.sync_to_fstab()
class WebConfig(Config): """ default password is 123456, fc91f9f874d2ef4d48fdde151271716f268977c1f77241d5321b61fda137ac3c is sha256 hash of result of PBKDF2 to 123456 """ CONF_FILE = os.path.join(STORLEVER_CONF_DIR, 'web.yaml') DEFAULT_CONF = { 'password': '******', 'language': 'chinese' } SCHEMA = Schema({ 'password': Use(str), # filesystem type 'language': Use(str), # dev file }) def __init__(self, conf=None): self.conf_file = self.CONF_FILE self.conf = conf self.schema = self.SCHEMA def parse(self): if self.conf_file is not None and self.conf_file != "": try: with open(self.conf_file, "r") as f: self.conf = yaml.load(f) if self.schema: self.conf = self.schema.validate(self.conf) return self.conf except IOError as e: if e.errno == errno.ENOENT: self.conf = self.DEFAULT_CONF self.write() return self.conf else: raise ConfigError(str(e)) except Exception: raise ConfigError(str(Exception)) else: raise ConfigError("conf file absent") @classmethod def from_file(cls): conf = cls() conf.parse() return conf @classmethod def to_file(cls, conf): conf = cls(conf=conf) conf.write() return conf
def test_optional_value(self): schema = Schema({"key": str, Optional("op_key"): IntVal()}) data = schema.validate({"key": "abc"}) self.assertEqual({"key": "abc"}, data) data = schema.validate({'key': 'abc', 'op_key': 123}) self.assertEqual({'key': 'abc', 'op_key': 123}, data) with self.assertRaises(SchemaError): schema.validate({'key': 'abc', 'op_key': 'bcd'})
def test_autodel(self): schema = Schema({ 'key': str, AutoDel(str): object }) schema2 = Schema({ 'key': str }) data = schema.validate({'key': 'abc', 'key2': 'bbb', 'key3': [1, 2, 3]}) self.assertEqual({'key': 'abc'}, data) with self.assertRaises(SchemaError): schema2.validate({'key': 'abc', 'key2': 'bbb', 'key3': [1, 2, 3]}) with self.assertRaises(SchemaError): schema.validate({'key2': 'bbb', 1: [1, 2, 3]})
def set_monitor_list(self, monitor_list=[], operator="unkown"): monitor_list = Schema([self.smartd_monitor_conf_schema ]).validate(monitor_list) for i, monitor_conf in enumerate(monitor_list[:]): monitor_list[i] = filter_dict(monitor_conf, ("dev", "mail_to", "mail_test", "mail_exec", "schedule_regexp")) with self.lock: smartd_conf = self._load_conf() smartd_conf["monitor_list"] = monitor_list # check validation for monitor_conf in smartd_conf["monitor_list"]: if not os.path.exists(monitor_conf["dev"]): raise StorLeverError( "Device (%s) not found" % (monitor_conf["dev"]), 404) else: mode = os.stat(monitor_conf["dev"])[ST_MODE] if not S_ISBLK(mode): raise StorLeverError( "Device (%s) not block device" % (monitor_conf["dev"]), 400) if monitor_conf["mail_exec"] != "" and not os.path.exists( monitor_conf["mail_exec"]): raise StorLeverError( "mail_exec (%s) not found" % (monitor_conf["mail_exec"]), 404) # save new conf self._save_conf(smartd_conf) self._sync_to_system_conf(smartd_conf) logger.log( logging.INFO, logger.LOG_TYPE_CONFIG, "Smartd monitor list is updated by operator(%s)" % (operator))
'/san/tgt/target_list/{target_iqn}/lun_list/{lun_number}') @get_view(route_name='tgt_conf') def get_tgt_conf(request): tgt_mgr = tgtmgr.TgtManager return tgt_mgr.get_tgt_conf() tgt_conf_schema = Schema({ # Define iscsi incoming discovery authentication setting. If it is # empty, no authentication is performed. The format is username:passwd Optional("incomingdiscoveryuser"): StrRe(r"^(|\w+:\w+)$"), # Define iscsi outgoing discovery authentication setting. If it is # empty, no authentication is performe The format is username:passwd Optional("outgoingdiscoveryuser"): StrRe(r"^(|\w+:\w+)$"), DoNotCare(Use(str)): object # for all other key we don't care }) @put_view(route_name='tgt_conf') def put_tgt_conf(request): tgt_mgr = tgtmgr.TgtManager tgt_conf = get_params_from_request(request, tgt_conf_schema) tgt_mgr.set_tgt_conf(tgt_conf, operator=request.client_addr) return Response(status=200)
"Provides the fundamental functions of the low-lever system, " "like date, time, log, shutdown, reboot, and etc" } LOG_DIR = "/var/log" LOG_FILE_PATH_PREFIX = "/tmp/syslog" SELINUX_CONF_DIR = "/etc/selinux/" SELINUX_CONF_FILE = "config" ETC_HOSTS_FILE = "/etc/hosts" ETC_NETWORK_FILE = "/etc/sysconfig/network" HOST_LIST_SCHEMA = Schema([{ "addr": Use(str), "hostname": Use(str), Optional("alias"): Default(Use(str), default=""), AutoDel(str): object # for all other key we auto delete }]) class SysManager(object): """contains all methods to manage the system""" def __init__(self): self.dist_name = None self.dist_version = None self.dist_id = None def system_restore_cb(self): self.set_hostname("localhost", "System Restore") def get_hostname(self):
#http://192.168.1.10:6543/storlever/api/v1/block/md_list/name @get_view(route_name='md') def get_md_rest(request): md_mgr = md.md_mgr() mds = md_mgr.get_all_md() name = request.matchdict['md_name'] md_inf = mds.get_md(name) return md_inf add_md_schema = Schema({ "name": StrRe(r"^(.+)$"), "level": Or(1, 0, 5, 10, 6), "dev": Default(ListVal(StrRe(r"^(/dev/sd[a-z]|/dev/xvd.+)$")), default=[]), DoNotCare(Use(str)): object # for all those key we don't care }) #curl -v -X POST -d name=test -d dev=/dev/sdb,/dev/sdc -d level=1 http://192.168.1.2:6543/storlever/api/v1/block/md_list @post_view(route_name='md_list') def add_md_rest(request): md_mgr = md.md_mgr() mds = md_mgr.get_all_md() params = get_params_from_request(request, add_md_schema) mds.create(params['name'], params['level'], params['dev']) return Response(status=200)
def test_bool_value(self): schema = Schema(BoolVal()) self.assertEquals(True, schema.validate(True)) self.assertEquals(True, schema.validate("True")) self.assertEquals(True, schema.validate("true")) self.assertEquals(False, schema.validate(False)) self.assertEquals(False, schema.validate("False")) self.assertEquals(False, schema.validate("false")) with self.assertRaises(SchemaError): schema.validate(0) with self.assertRaises(SchemaError): schema.validate(1) with self.assertRaises(SchemaError): schema.validate("abc")
def test_int_value(self): schema = Schema(IntVal()) self.assertEquals(10, schema.validate(10)) self.assertEquals(10, schema.validate('10')) with self.assertRaises(SchemaError): schema.validate('abc') schema = Schema(IntVal(values=(0, 1))) self.assertEqual(1, schema.validate(1)) self.assertEqual(1, schema.validate("1")) with self.assertRaises(SchemaError): schema.validate(2) with self.assertRaises(SchemaError): schema.validate("2") schema = Schema(IntVal(min=10, max=100)) self.assertEqual(10, schema.validate(10)) self.assertEqual(100, schema.validate('100')) self.assertEqual(50, schema.validate(50)) with self.assertRaises(SchemaError): schema.validate(200) with self.assertRaises(SchemaError): schema.validate(0) schema = Schema(IntVal(min=10, max=100, values=(0, 1))) self.assertEqual(0, schema.validate(0)) self.assertEqual(100, schema.validate(100)) self.assertEqual(50, schema.validate('50')) with self.assertRaises(SchemaError): schema.validate(2) with self.assertRaises(SchemaError): schema.validate(200) schema = Schema(IntVal(min=10, values=(0, 1))) self.assertEqual(200, schema.validate(200)) self.assertEqual(1, schema.validate(1)) with self.assertRaises(SchemaError): schema.validate(3)
"vsftpd" ], "comment": "Provides the management functions for FTP server" } FTP_CONF_FILE_NAME = "ftp_conf.yaml" VSFTPD_ETC_CONF_DIR = "/etc/vsftpd/" VSFTPD_ETC_CONF_FILE = "vsftpd.conf" VSFTPD_ETC_USER_LIST = "user_list" VSFTPD_ETC_CHROOT_LIST = "chroot_list" FTP_USER_CONF_SCHEMA = Schema({ "user_name": Use(str), # When enabled, the user can log in ftp Optional("login_enable"): Default(BoolVal(), default=True), # When enabled, the user will be placed into the chroot jail Optional("chroot_enable"): Default(BoolVal(), default=False), AutoDel(str): object # for all other key we auto delete }) FTP_CONF_SCHEMA = Schema({ Optional("listen"): Default(BoolVal(), default=False), # ftp service listen on ipv4 port Optional("listen6"): Default(BoolVal(), default=False), # ftp service listen on ipv6 port Optional("listen_port"): Default(IntVal(min=1, max=65535), default=21), # ftp port number # The maximum amount of time between commands from a remote client. # Once triggered, the connection to the remote client is closed Optional("idle_session_timeout"): Default(Use(int), default=300), # the maximum data transfer rate for anonymous users in bytes per second.
"comment": "Provides the support to send email by mailx utility" } MAIL_CONF_FILE_NAME = "mail_conf.yaml" MAIL_ETC_CONF_DIR = "/etc/" MAIL_ETC_CONF_FILE = "mail.rc" MAIL_CMD = "/bin/mail" MAIL_CONF_SCHEMA = Schema({ # the email address of user's account, it would also be place in the FROM header of the email Optional("email_addr"): Default(Use(str), default=""), # smtp server address to send the mail Optional("smtp_server"): Default(Use(str), default=""), # password for the account Optional("password"): Default(Use(str), default=""), AutoDel(str): object # for all other key we auto delete }) class MailManager(object): """contains all methods to manage NTP server in linux system""" def __init__(self): # need a mutex to protect create/delete bond interface self.lock = lock() self.conf_file = os.path.join(STORLEVER_CONF_DIR, MAIL_CONF_FILE_NAME)
ntp_server_list_schema = Schema([{ # it can be a ipv4 address, ipv6 address, or host dns name "server_addr": StrRe(r"^\S+$"), # if set to True, it would be forced to resolve the host name to # ipv6 address in DNS resolution Optional("ipv6"): Default(BoolVal(), default=False), # Marks the server as preferred. All other things being equal, # this host will be chosen for synchronization among set of correctly operating hosts Optional("prefer"): Default(BoolVal(), default=False), # Specifies a mode number which is interpreted in a device # specific fashion. For instance, it selects a dialing, # protocol in the ACTS driver and a device subtype in the # parse drivers. # Only valid for reference clock server, i.e. server_addr is 127.127.t.n Optional("mode"): Default(IntVal(min=0, max=65535), default=0), # Specifies the stratum number assigned to the driver, an # integer between 0 and 15. This number overrides the # default stratum number ordinarily assigned by the driver # itself, usually zero. # Only valid for reference clock server, i.e. server_addr is 127.127.t.n Optional("stratum"): Default(IntVal(min=0, max=15), default=0), # These four flags are used for customizing the clock # driver. The interpretation of these values, and whether # they are used at all, is a function of the particular # clock driver. However, by convention flag4 is used to # enable recording monitoring data to the clockstats file # configured with the filegen command. Further information # on the filegen command can be found in Monitoring # Options. # Only valid for reference clock server, i.e. server_addr is 127.127.t.n Optional("flag1"): Default(IntVal(min=0, max=1), default=0), Optional("flag2"): Default(IntVal(min=0, max=1), default=0), Optional("flag3"): Default(IntVal(min=0, max=1), default=0), Optional("flag4"): Default(IntVal(min=0, max=1), default=0), DoNotCare(Use(str)): object # for all other key we don't care }])
SMB_CONF_SCHEMA = Schema({ # workgroup controls what workgroup your server will appear to be in when queried # by clients. Note that this parameter also controls the Domain name used # with the security = domain setting, Optional("workgroup"): Default(Use(str), default="MYGROUP"), # This controls what string will show up in the printer comment box in print # manager and next to the IPC connection in net view. It can be any string # that you wish to show to your users. Optional("server_string"): Default(Use(str), default="Storlever Samba %v"), # This sets the NetBIOS name by which a Samba server is known. By default it # is empty, means the same as the first component of the host's DNS name. If a machine is # a browse server or logon server this name (or the first component of the # hosts DNS name) will be the name that these services are advertised under Optional("netbios_name"): Default(Use(str), default=""), # This parameter is a comma, space, or tab delimited set of hosts which are # permitted to access a service. Default is empty, means all hosts can access Optional("hosts_allow"): Default(Use(str), default=""), # This option affects how clients respond to Samba, which can share/user/server/domain/ads # default is user Optional("security"): Default(Use(str), default="user"), # This option allows the administrator to chose which backend will be used # for storing user and possibly group information. This allows you to swap # between different storage mechanisms without recompile. default is tdbsam Optional("passdb_backend"): Default(Use(str), default="tdbsam"), # specifying the name of another SMB server or Active Directory domain # controller with this option, and using security = [ads|domain|server] it is # possible to get Samba to do all its username/password validation using a # specific remote server. Default is empty, means auto locate. Optional("password_server"): Default(Use(str), default=""), # This option specifies the kerberos realm to use. The realm is used as the # ADS equivalent of the NT4 domain. It is usually set to the DNS name of the # kerberos server.. Optional("realm"): Default(Use(str), default=""), # This is a username which will be used for access to services which are # specified as guest ok (see below). Whatever privileges this user has will # be available to any client connecting to the guest service. This user must # exist in the password file, but does not require a valid login.. Optional("guest_account"): Default(Use(str), default="nobody"), # This controls whether the auto-home share is seen in the list of available shares in # a net view and in the browse list Optional("browseable"): Default(BoolVal(), default=False), Optional("share_list"): Default(Schema({DoNotCare(str): SHARE_CONF_SCHEMA}), default={}), AutoDel(str): object # for all other key we auto delete })
def test_dict(self): schema = Schema({ "key1": str, # key1 should be string "key2": Use(int), # key3 should be in or int in string "key3": [IntVal(min=10, max=20)], # key4 is optional, Optional("key4"): str, Optional('key5'): Default(IntVal(min=100, max=200), default=100), DoNotCare(str): object # for all those key we don't care }) data = schema.validate({ "key1": "abc", "key2": '123', "key3": [10, 15, 20], "key5": 199, }) self.assertEqual(data, { "key1": "abc", "key2": 123, "key3": [10, 15, 20], "key5": 199 }) data = schema.validate({ "key1": "abc", "key2": '123', "key3": [10, 15, 20], }) self.assertEqual(data, { "key1": "abc", "key2": 123, "key3": [10, 15, 20], "key5": 100 }) data = schema.validate({ "key1": "abc", "key2": '123', "key3": [10, 15, 20], "key4": 'abc' }) self.assertEqual(data, { "key1": "abc", "key2": 123, "key3": [10, 15, 20], "key4": 'abc', "key5": 100 }) data = schema.validate({ "key1": "abc", "key2": '123', "key3": [10, 15, 20], "key4": 'abc', "key100": 'bbc', 'key200': [123, 23, 334] }) self.assertEqual(data, { "key1": "abc", "key2": 123, "key3": [10, 15, 20], "key4": 'abc', "key5": 100, "key100": 'bbc', 'key200': [123, 23, 334] }) with self.assertRaises(SchemaError): schema.validate({ 'key1': 123, "key2": '123', "key3": [10, 15, 20], "key4": 223, }) with self.assertRaises(SchemaError): schema.validate({ 'key1': 123, "key2": '123', "key3": [10, 15, 20], "key4": 'abc', "key100": 'bbc', 'key200': [123, 23, 334] }) with self.assertRaises(SchemaError): schema.validate({ 'key1': 'abc', "key2": '123', "key3": [10, 15, 20], "key4": 'abc', 'key5': 0, "key100": 'bbc', 'key200': [123, 23, 334] })
block_info = block_mgr.get_block_dev_by_name(block_name) block = { 'name':block_info.name, 'major':block_info.major, 'minor':block_info.minor, 'size':block_info.size, 'type':block_info.type, 'readonly':block_info.readonly, 'fs_type':block_info.fs_type, 'mount_point':block_info.mount_point } return block block_clean_meta_schema = Schema({ Optional("opt"): StrRe(r"^(clean_meta|flush_buf)$"), DoNotCare(Use(str)): object # for all those key we don't care }) # curl -v -X put -d opt=clean_meta 'http://192.168.1.123:6543/storlever/api/v1/block/block_list/sdb' @put_view(route_name='block') def block_clean_meta(request): block_name = request.matchdict['block'] params = get_params_from_request(request, block_clean_meta_schema) if params['opt'] == "clean_meta": block_mgr = blockmgr.block_mgr() block_dev = block_mgr.get_block_dev_by_name(block_name) block_dev.clean_meta() elif params['opt'] == "flush_buf": block_mgr = blockmgr.block_mgr() block_dev = block_mgr.get_block_dev_by_name(block_name) block_dev.flush_block_buf()
'system': sys_uname[0], 'release': sys_uname[2], 'version': sys_uname[3], 'machine': sys_uname[4], 'processor': sys_uname[5], 'dist_name': dist_name, 'dist_version': dist_version, 'dist_id': dist_id, "uptime": str(uptime).split('.')[0], "loadavg": [av1, av2, av3] } return info local_host_schema = Schema({ Optional("hostname"): Use(str), # name should be string DoNotCare(Use(str)): object # for all those key we don't care }) @put_view(route_name='system_localhost') def put_system_localhost(request): sys_mgr = sysinfo.sys_mgr() # get sys manager params = get_params_from_request(request, local_host_schema) if "hostname" in params: sys_mgr.set_hostname(params["hostname"], user=request.client_addr) return Response(status=200) @get_view(route_name='cpu_list') def system_cpu_list_get(request): sys_mgr = sysinfo.sys_mgr() # get sys manager
NTP_SERVER_CONF_SCHEMA = Schema({ # it can be a ipv4 address, ipv6 address, or host dns name "server_addr": Use(str), # if set to True, it would be forced to resolve the host name to # ipv6 address in DNS resolution Optional("ipv6"): Default(BoolVal(), default=False), # Marks the server as preferred. All other things being equal, # this host will be chosen for synchronization among set of correctly operating hosts Optional("prefer"): Default(BoolVal(), default=False), # Specifies a mode number which is interpreted in a device # specific fashion. For instance, it selects a dialing, # protocol in the ACTS driver and a device subtype in the # parse drivers. # Only valid for reference clock server, i.e. server_addr is 127.127.t.n Optional("mode"): Default(IntVal(min=0, max=65535), default=0), # Specifies the stratum number assigned to the driver, an # integer between 0 and 15. This number overrides the # default stratum number ordinarily assigned by the driver # itself, usually zero. # Only valid for reference clock server, i.e. server_addr is 127.127.t.n Optional("stratum"): Default(IntVal(min=0, max=15), default=0), # These four flags are used for customizing the clock # driver. The interpretation of these values, and whether # they are used at all, is a function of the particular # clock driver. However, by convention flag4 is used to # enable recording monitoring data to the clockstats file # configured with the filegen command. Further information # on the filegen command can be found in Monitoring # Options. # Only valid for reference clock server, i.e. server_addr is 127.127.t.n Optional("flag1"): Default(IntVal(min=0, max=1), default=0), Optional("flag2"): Default(IntVal(min=0, max=1), default=0), Optional("flag3"): Default(IntVal(min=0, max=1), default=0), Optional("flag4"): Default(IntVal(min=0, max=1), default=0), AutoDel(str): object # for all other key we auto delete })
config.add_route('quota_user', '/fs/list/{fsname}/quota_user/{user_name}') #put dete config.add_route('share_list', '/fs/fs_list/{fs}/share_list') #http://192.168.1.2:6543/storlever/api/v1/fs/type_list @get_view(route_name='type_list') def get_fs_type_list(request): fs_mrg = fsmgr.fs_mgr() type_list = fs_mrg.fs_type_list() return type_list mk_fs_schema = Schema({ "type": StrRe(r"^([a-zA-Z].+)$"), "dev": StrRe(r"^(/dev/.+)$"), Optional("options"): Default(StrRe(), default=""), DoNotCare(Use(str)): object # for all those key we don't care }) #curl -v -X POST -d type=ext4 -d dev=/dev/mapper/vg1-lv http://192.168.1.2:6543/storlever/api/v1/fs/mkfs @post_view(route_name='mkfs') def mk_fs(request): fs_mrg = fsmgr.fs_mgr() params = get_params_from_request(request, mk_fs_schema) fs_mrg.mkfs_on_dev(params["type"], params["dev"], params["options"]) return Response(status=200) #http://192.168.1.2:6543/storlever/api/v1/fs_list @get_view(route_name='fs_list')
import tarfile import os from storlever.lib.exception import StorLeverCmdError, StorLeverError from storlever.lib.command import check_output from storlever.lib.schema import Schema, Use, Optional, \ Default, DoNotCare, BoolVal, IntVal, AutoDel MODULE_CONF_SCHEMA = Schema({ "module_name": Use(str), Optional("rpms"): Default([Use(str)], default=[]), Optional("extra_files"): Default([Use(str)], default=[]), Optional("comment"): Default(Use(str), default=""), AutoDel(str): object # for all other key we auto delete }) RPM_CMD = "/bin/rpm" class ModuleManager(object): """contains all methods to manage the storlever cfg""" def __init__(self): self.managed_modules = {} self.module_schema = MODULE_CONF_SCHEMA
def test_dict(self): schema = Schema({ "key1": str, # key1 should be string "key2": Use(int), # key3 should be in or int in string "key3": [IntVal(min=10, max=20)], # key4 is optional, Optional("key4"): str, Optional('key5'): Default(IntVal(min=100, max=200), default=100), DoNotCare(str): object # for all those key we don't care }) data = schema.validate({ "key1": "abc", "key2": '123', "key3": [10, 15, 20], "key5": 199, }) self.assertEqual(data, { "key1": "abc", "key2": 123, "key3": [10, 15, 20], "key5": 199 }) data = schema.validate({ "key1": "abc", "key2": '123', "key3": [10, 15, 20], }) self.assertEqual(data, { "key1": "abc", "key2": 123, "key3": [10, 15, 20], "key5": 100 }) data = schema.validate({ "key1": "abc", "key2": '123', "key3": [10, 15, 20], "key4": 'abc' }) self.assertEqual( data, { "key1": "abc", "key2": 123, "key3": [10, 15, 20], "key4": 'abc', "key5": 100 }) data = schema.validate({ "key1": "abc", "key2": '123', "key3": [10, 15, 20], "key4": 'abc', "key100": 'bbc', 'key200': [123, 23, 334] }) self.assertEqual( data, { "key1": "abc", "key2": 123, "key3": [10, 15, 20], "key4": 'abc', "key5": 100, "key100": 'bbc', 'key200': [123, 23, 334] }) with self.assertRaises(SchemaError): schema.validate({ 'key1': 123, "key2": '123', "key3": [10, 15, 20], "key4": 223, }) with self.assertRaises(SchemaError): schema.validate({ 'key1': 123, "key2": '123', "key3": [10, 15, 20], "key4": 'abc', "key100": 'bbc', 'key200': [123, 23, 334] }) with self.assertRaises(SchemaError): schema.validate({ 'key1': 'abc', "key2": '123', "key3": [10, 15, 20], "key4": 'abc', 'key5': 0, "key100": 'bbc', 'key200': [123, 23, 334] })
return port_info #/network/eth_list/{port_name}/stat @get_view(route_name='port_stat') def get_port_stat(request): port_name = request.matchdict['port_name'] eth_face = ifmgr.if_mgr() netif_info = eth_face.get_interface_by_name(port_name) stat_info = netif_info.statistic_info return stat_info port_mod_schema = Schema({ Optional("ip"): StrRe(r"^(|\d+\.\d+\.\d+\.\d+)$"), # ip addr Optional("netmask"): StrRe(r"^(|\d+\.\d+\.\d+\.\d+)$"), # netmask addr Optional("gateway"): StrRe(r"^(|\d+\.\d+\.\d+\.\d+)$"), # gateway addr DoNotCare(Use(str)): object # for all those key we don't care }) #curl -v -X PUT -d ip=192.168.0.222 -d gateway=192.168.1.1 -d netmask=255.255.0.0 http://192.168.1.123:6543/storlever/api/v1/network/eth_list/eth0 @put_view(route_name='single_port') def modify_single_port(request): port_info = get_params_from_request(request, port_mod_schema) port_name = request.matchdict['port_name'] eth_face = ifmgr.if_mgr() eth = eth_face.get_interface_by_name(port_name) eth.set_ip_config(ip=port_info.get("ip", None), netmask=port_info.get("netmask", None), gateway=port_info.get("gateway", None), user=request.client_addr)
ZABBIX_AGENT_CONF_FILE_NAME = "zabbix_agentd_conf.yaml" ZABBIX_AGENT_ETC_CONF_DIR = "/etc/zabbix/" ZABBIX_AGENT_CONF_FILE = "zabbix_agentd.conf" ZABBIX_AGENT_CONF_SCHEMA = Schema({ Optional("hostname"): Default(Use(str), default=""), # How often list of active checks is refreshed, in seconds. # Note that after failing to refresh active checks the next refresh # will be attempted after 60 seconds. Optional("refresh_active_check"): Default(IntVal(min=60, max=3600), default=120), # the server ip:port list for active check.zabbix_agent would get the active check list # from each server at the refresh_active_check frequency. Entry string Format is IP:PORT Optional("active_check_server_list"): Default([Use(str)], default=[]), # the server ip list for passive check. each passive check's source ip must # exist in this list. Entry string Format is IP Optional("passive_check_server_list"): Default([Use(str)], default=[]), AutoDel(str): object # for all other key we auto delete }) class ZabbixAgentManager(object): """contains all methods to manage NTP server in linux system""" def __init__(self):
vg_dict = [] for vg in vgs: vg_info = { 'name': vgs[vg].name, 'uuid': vgs[vg].uuid, 'size': vgs[vg].size, 'free_size': vgs[vg].free_size, } vg_dict.append(vg_info) return vg_dict new_vg_schema = Schema({ "vgname": StrRe(r"^([a-zA-Z].+)$"), "dev": Default(ListVal(StrRe(r"^(/dev/sd[a-z]|/dev/md.+)$")), default=[]), DoNotCare(Use(str)): object # for all those key we don't care }) #curl -v -X POST -d vgname=vg1 -d dev=/dev/sdb,/dev/sdc http://192.168.1.123:6543/storlever/api/v1/block/lvm/vg_list #enable eth* or disable eth* @post_view(route_name='vg_list') def create_vg(request): lvm_mng = lvm.lvm_mgr() params = get_params_from_request(request, new_vg_schema) vg = lvm_mng.new_vg(params['vgname'], params['dev']) if vg is None: return Response(status=500)
class FileSystemManager(object): """contains all methods to manage ethernet interface in linux system""" def __init__(self): # need a mutex to protect create/delete bond interface self.lock = lock() self.support_fs_type = {} self.conf_file = os.path.join(STORLEVER_CONF_DIR, FS_CONF_FILE_NAME) self.fs_conf_schema = Schema({ "type": Use(str), # filesystem type "dev_file": Use(str), # dev file "dev_uuid": Use(str), # dev uuid "mount_point": Use(str), # mount point of this fs, "mount_option": Use(str), # mount option of this fs "check_onboot": BoolVal(), # fsck fs on boot Optional("comment"): Default(Use(str), default=""), # comment, AutoDel(str): object # for all other key we auto delete }) self.fs_dict_schema = Schema({ DoNotCare(str): self.fs_conf_schema }) # sync fs conf to fstab on boot self.sync_to_fstab() def _uuid_to_dev_file(self, uuid): try: return check_output(["/sbin/blkid", "-U", uuid]).strip() except Exception: return "" def _dev_file_to_uuid(self, dev_file): try: return subprocess.check_output( ["/sbin/blkid", "-s", "UUID", "-o", "value", dev_file], stderr=subprocess.STDOUT, shell=False).strip() except subprocess.CalledProcessError as e: if e.returncode == 2: http_status = 400 info = "The dev file (%s) has no UUID tag. " \ "Make sure it exists and contains a filesystem" % dev_file else: http_status = 500 info = e.output # re-raise the storlever's error raise StorLeverError(info, http_status) def _load_conf(self): fs_dict = {} cfg_mgr().check_conf_dir() if os.path.exists(self.conf_file): fs_dict = \ Config.from_file(self.conf_file, self.fs_dict_schema).conf # check dev_file by uuid for fs_name, fs_conf in fs_dict.items(): if fs_conf["dev_uuid"] != "": fs_conf["dev_file"] = self._uuid_to_dev_file(fs_conf["dev_uuid"]) return fs_dict def _save_conf(self, fs_dict): cfg_mgr().check_conf_dir() Config.to_file(self.conf_file, fs_dict) def _fs_conf_to_fstab_line(self, fs_name, fs_conf): if fs_conf["dev_uuid"] == "": dev_file_name = fs_conf["dev_file"] else: dev_file_name = "UUID=%s" % fs_conf["dev_uuid"] if fs_conf["check_onboot"]: boot_flag = 2 else: boot_flag = 0 # get a fs object fs_object = self._get_fs_type_cls(fs_conf["type"])(fs_name, fs_conf) if fs_object.mount_options == "": option_flag = "defaults" else: option_flag = fs_object.mount_options return "%s\t%s\t%s\t%s\t0\t%d\n" % \ (dev_file_name, fs_conf["mount_point"], fs_conf["type"], option_flag, boot_flag) def _sync_to_fstab(self, fs_dict): if os.path.exists(FSTAB_FILE_PATH): with open(FSTAB_FILE_PATH, "r") as f: lines = f.readlines() else: lines = [] if "# begin storlever\n" in lines: before_storlever = lines[0:lines.index("# begin storlever\n")] else: before_storlever = lines[0:] if before_storlever and (not before_storlever[-1].endswith("\n")): before_storlever[-1] += "\n" if "# end storlever\n" in lines: after_storlever = lines[lines.index("# end storlever\n") + 1:] else: after_storlever = [] with open(FSTAB_FILE_PATH, "w") as f: f.writelines(before_storlever) f.write("# begin storlever\n") for fs_name, fs_conf in fs_dict.items(): f.write(self._fs_conf_to_fstab_line(fs_name, fs_conf)) f.write("# end storlever\n") f.writelines(after_storlever) def sync_to_fstab(self): """sync the fs conf list in storlever to /etc/fstab""" with self.lock: fs_dict = self._load_conf() self._sync_to_fstab(fs_dict) def system_restore_cb(self): """sync the fs conf list in storlever to /etc/fstab""" with self.lock: fs_dict = {} self._sync_to_fstab(fs_dict) def _mount_fs(self, name, fs_conf): # get a fs object fs_object = self._get_fs_type_cls(fs_conf["type"])(name, fs_conf) # call this object's mount method fs_object.mount() def _umount_fs(self, name, fs_conf): # get a fs object fs_object = self._get_fs_type_cls(fs_conf["type"])(name, fs_conf) # call this object's mount method fs_object.umount() def _get_fs_type_cls(self, type): cls = self.support_fs_type.get(type, fs.FileSystem) return cls def add_fs_type(self, type, cls, *args, **kwargs): """add the fs class with specific type name""" with self.lock: self.support_fs_type[type] = cls def get_fs_by_name(self, fs_name): """return a fs object according to the given fs name""" with self.lock: fs_dict = self._load_conf() if fs_name not in fs_dict: raise StorLeverError("Filesystem(%s) does not exist" % fs_name, 404) fs_conf = fs_dict[fs_name] cls = self._get_fs_type_cls(fs_conf["type"]) return cls(fs_name, fs_conf) def get_fs_list(self): """get the fs object list in the storlever """ with self.lock: fs_dict = self._load_conf() fs_list = [] for fs_name, fs_conf in fs_dict.items(): cls = self._get_fs_type_cls(fs_conf["type"]) fs_list.append(cls(fs_name, fs_conf)) return fs_list def fs_type_list(self): """list all fs type supported in the storlever""" with self.lock: type_list = self.support_fs_type.keys() return type_list def add_fs(self, fs_name, type, dev_file, mount_option="", check_onboot=False, comment="", user="******"): """add a filesystem with the given properties to storlever The new filesystem would be mount on the specific directory(/mnt/FS_NAME) and would be added to the storlever's fs config """ # check type if type not in self.support_fs_type: raise StorLeverError("type(%s) does not support" % type, 400) # check mount point mount_point = os.path.join(MOUNT_DIR, fs_name) if os.path.exists(mount_point): if not os.path.isdir(mount_point): raise StorLeverError("mount point(%s) already exists and is not directory" % mount_point) else: # create mount point os.makedirs(mount_point) # don't check dev file exist, because for the network fs, the dev file is a network id # if not os.path.exists(dev_file): # raise StorLeverError("dev file(%s) does not exist" % dev_file, 400) dev_uuid = "" if (not dev_file.startswith("/dev/mapper")) and os.path.exists(dev_file): dev_uuid = self._dev_file_to_uuid(dev_file) fs_conf = { "type": type, "dev_file": dev_file, "dev_uuid": dev_uuid, "mount_point": mount_point, "mount_option": mount_option, "check_onboot": check_onboot, "comment": comment } fs_conf = self.fs_conf_schema.validate(fs_conf) with self.lock: fs_dict = self._load_conf() if fs_name in fs_dict: raise StorLeverError("filesystem(%s) already exist" % fs_name, 400) # mount fs first self._mount_fs(fs_name, fs_conf) fs_dict[fs_name] = fs_conf self._save_conf(fs_dict) self._sync_to_fstab(fs_dict) logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "New filesystem %s (dev:%s, mount_point:%s, option:%s) " "is added by user(%s)" % (fs_name, dev_file, mount_point, mount_option, user)) def del_fs(self, fs_name, user="******"): """delete a filesystem from storlever the file would be deleted from the storlever's config file and would be unmount from linux system """ with self.lock: fs_dict = self._load_conf() if fs_name not in fs_dict: raise StorLeverError("filesystem(%s) does not exist" % fs_name, 400) fs_conf = fs_dict[fs_name] del fs_dict[fs_name] #umount fs first. if it failed, don't delete it in the config self._umount_fs(fs_name, fs_conf) self._save_conf(fs_dict) self._sync_to_fstab(fs_dict) try: os.rmdir(fs_conf["mount_point"]) except OSError as e: pass logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "filesystem %s (dev:%s, mount_point:%s, option:%s) " "is deleted by user(%s)" % (fs_name, fs_conf['dev_file'], fs_conf['mount_point'], fs_conf['mount_option'], user)) def mkfs_on_dev(self, type, dev_file, fs_options=""): with self.lock: cls = self._get_fs_type_cls(type) cls.mkfs(type, dev_file, fs_options)