def main(): src_files = [ "logging-server", "meta-server", "package-client", "collserver" ] if not config_store.ConfigStore.exists("client"): new_store = config_store.ConfigStore( "client", access_mode=config_store.AccessModeEnum.LOCAL, fix_access_mode=True) new_store.read("client_sample") _dict = {} for _file in src_files: _short = "".join([_val[0].upper() for _val in _file.split("-")]) for _key, _value in parse_file(_file).items(): # simple cast if _value.isdigit(): _value = int(_value) elif _value.lower() in ["true"]: _value = True elif _value.lower() in ["false"]: _value = False _new_key = "{}_{}".format(_short, _key) _dict[_new_key] = _value if _new_key in MAP_DICT: new_store[MAP_DICT[_new_key]] = _value # new_store.show() new_store.write() else: config_store.ConfigStore("client", access_mode=config_store.AccessModeEnum.LOCAL, fix_access_mode=True)
def read_config_store(self): try: self.config_store = config_store.ConfigStore( CS_NAME, log_com=self.log, access_mode=config_store.AccessModeEnum.LOCAL) except config_store.ConfigStoreError: self.log( "configstore is not valid: {}".format( process_tools.get_except_info()), logging_tools.LOG_LEVEL_ERROR) # remove store config_store.ConfigStore.remove_store(CS_NAME) # create self.config_store = config_store.ConfigStore( CS_NAME, log_com=self.log, access_mode=config_store.AccessModeEnum.LOCAL) for _key, _default in DEFAULT_PROC_DICT.items(): self.config_store[_key] = self.config_store.get(_key, _default) global_config.add_config_entries([ ("MON_TARGET_STATE", configfile.BoolConfigVar(self.config_store["start_process"])), # just a guess ("MON_CURRENT_STATE", configfile.BoolConfigVar(False)), ]) self.config_store.write() self.log(self._get_flag_info())
def read_config(self): sample_name = "{}_sample".format(CS_NAME) if not config_store.ConfigStore.exists(sample_name, ): self.log("Creating sample config store") sample_cs = config_store.ConfigStore( sample_name, log_com=self.log, read=False, access_mode=config_store.AccessModeEnum.LOCAL, fix_access_mode=True) for _key, _value in DEFAULTS.iteritems(): sample_cs[_key] = _value sample_cs.write() if config_store.ConfigStore.exists(CS_NAME): try: self.config = config_store.ConfigStore( CS_NAME, log_com=self.log, access_mode=config_store.AccessModeEnum.LOCAL, fix_access_mode=True) except: self.log( "disabled postgres machvector-feed because error parsing config store {}: {}" .format( CS_NAME, process_tools.get_except_info(), ), logging_tools.LOG_LEVEL_ERROR) self.config = None else: self.log( "disabled postgres machvector-feed because no config-store {} found" .format(CS_NAME, ), logging_tools.LOG_LEVEL_ERROR) self.config = None
def init(srv_process): OLD_CONFIG_NAME = "/etc/sysconfig/cluster/package_server_clients.xml" Client.srv_process = srv_process Client.uuid_set = set() Client.name_set = set() Client.lut = {} if not config_store.ConfigStore.exists(CLIENT_CS_NAME): _create = True _cs = config_store.ConfigStore(CLIENT_CS_NAME, log_com=Client.srv_process.log, read=False, prefix="client") if os.path.exists(OLD_CONFIG_NAME): _xml = etree.fromstring(open(OLD_CONFIG_NAME, "r").read()) for _idx, _entry in enumerate(_xml.findall(".//package_client")): _cs["{:d}".format(_idx)] = { "name": _entry.attrib["name"], "uuid": _entry.text, } Client.CS = _cs else: Client.CS = config_store.ConfigStore(CLIENT_CS_NAME, log_com=Client.srv_process.log, prefix="client") _create = False if _create: Client.CS.write() _cs.write() for client_num in list(Client.CS.keys()): _stuff = Client.CS[client_num] Client.register(_stuff["uuid"], _stuff["name"])
def _login(request, _user_object, login_credentials=None): login(request, _user_object) login_history.login_attempt(_user_object, request, True) # session names my_sh = SessionHelper() my_sh.add(my_sh.get_full_key(request.session)) # check for multiple session _dup_keys = my_sh.check_for_multiple_session(request.session) # for alias logins login_name != login if login_credentials is not None: real_user_name, login_password, login_name = login_credentials request.session["login_name"] = login_name request.session["password"] = base64.b64encode( login_password.encode("utf-8")) else: request.session["login_name"] = _user_object.login # set user in thread_local middleware, otherwise the background job handling would not work thread_local_middleware.user = _user_object _user_object.login_count += 1 _user_object.save(update_fields=["login_count"]) user.objects.ensure_default_variables(_user_object) user.objects.cleanup_before_login(_user_object) # log user _cs = config_store.ConfigStore(GEN_CS_NAME, quiet=True) _mult_ok = _cs.get("session.multiple.per.user.allowed", False) if _mult_ok: # multiple sessions ok, report NO multiple sessions return 0 else: return len(_dup_keys)
def main(options): if not ICSWVersion.objects.all().count(): insert_idx = 0 else: insert_idx = max(ICSWVersion.objects.all().values_list("insert_idx", flat=True)) insert_idx += 1 _vers = config_store.ConfigStore(VERSION_CS_NAME, quiet=True) if options["modify"]: print("Renewing version info in ConfigStore") _vers["database"] = get_database_version() _vers["models"] = get_models_version() print _vers.file_name _vers.write() print("Creating {:d} version entries with idx {:d} ...".format( len(VERSION_NAME_LIST), insert_idx)) for _name in VERSION_NAME_LIST: _v = _vers[_name] print(" {} is {}".format(_name, _v)) ICSWVersion.objects.create( name=_name, version=_v, insert_idx=insert_idx, ) # stale entries stale = ICSWVersion.objects.filter(Q(insert_idx__lt=insert_idx)).count() print("Stale entries in database: {:d}".format(stale))
def register_cluster(opts): cluster_id = device_variable.objects.get_cluster_id() _dict = { 'username': opts.user, 'password': opts.password, 'cluster_name': opts.cluster_name, 'cluster_id': cluster_id, "fingerprint": hfp_tools.get_server_fp(serialize=True), } _vers = config_store.ConfigStore(VERSION_CS_NAME, quiet=True) for _df in ["database", "software", "models"]: _dict["{}_version".format(_df)] = _vers[_df] data = urllib.urlencode(_dict) try: res = urllib2.urlopen(REGISTRATION_URL, data) except urllib2.URLError as e: print("Error while accessing registration: {}".format(e)) traceback.print_exc(e) sys.exit(1) else: content = res.read() _install_license(content)
def process_init(self): self.__log_template = logging_functions.get_logger( config_store.ConfigStore("client", quiet=True), "{}/{}".format( process_tools.get_machine_name(), self.global_config["LOG_NAME"], ), process_name=self.name, ) self.__watcher = inotify_tools.InotifyWatcher() # was INOTIFY_IDLE_TIMEOUT in self.global_config, now static self.__idle_timeout = 5 # self.__watcher.add_watcher("internal", "/etc/sysconfig/host-monitoring.d", inotify_tools.IN_CREATE | inotify_tools.IN_MODIFY, self._trigger) self.__file_watcher_dict = {} self.__target_dict = {} # self.register_func("connection", self._connection) self.send_pool_message("register_callback", "register_file_watch", "fw_handle") self.send_pool_message("register_callback", "unregister_file_watch", "fw_handle") self.register_exception("term_error", self._sigint) self.allow_signal(15) self.register_func("fw_handle", self._fw_handle) # register watcher fd with 0MQ poller self.register_poller(self.__watcher._fd, zmq.POLLIN, self._inotify_check) self.log("idle_timeout is {:d}".format(self.__idle_timeout)) self.register_timer(self._fw_timeout, 1000)
def process_init(self): self.__log_template = logging_functions.get_logger( config_store.ConfigStore("client", quiet=True), "{}/{}".format( process_tools.get_machine_name(), self.global_config["LOG_NAME"], ), process_name=self.name, ) self.register_func("connection", self._connection) # clear flag for extra twisted thread self.__extra_twisted_threads = 0 # print self.start_kwargs if self.start_kwargs.get("icmp", True): self.icmp_protocol = HMIcmpProtocol( self, self.__log_template, debug=self.global_config["DEBUG"] ) # reactor.listenWith(icmp_twisted.icmp_port, self.icmp_protocol) # reactor.listen_ICMP(self.icmp_protocol) self.register_func("ping", self._ping) else: self.icmp_protocol = None self.register_func("resolved", self._resolved) self.register_timer(self._check_timeout, 5) self.__ip_re = re.compile("^\d+\.\d+\.\d+\.\d+$") self.__pending_id, self.__pending_dict = (0, {})
def _login(request, _user_object, login_credentials=None): login(request, _user_object) login_history.login_attempt(_user_object, request, True) # session names my_sh = SessionHelper() my_sh.add(my_sh.get_full_key(request.session)) # check for multiple session _dup_keys = my_sh.check_for_multiple_session(request.session) # for alias logins login_name != login if login_credentials is not None: real_user_name, login_password, login_name = login_credentials request.session["login_name"] = login_name request.session["password"] = base64.b64encode( login_password.decode("utf-8")) else: request.session["login_name"] = _user_object.login _user_object.login_count += 1 _user_object.save(update_fields=["login_count"]) _theme_shorts = [_short for _short, _long in settings.THEMES] if _user_object.ui_theme_selection not in _theme_shorts: _user_object.ui_theme_selection = _theme_shorts[0] _user_object.save() # log user _cs = config_store.ConfigStore(GEN_CS_NAME, quiet=True) _mult_ok = _cs.get("session.multiple.per.user.allowed", False) if _mult_ok: # multiple sessions ok, report NO multiple sessions return 0 else: return len(_dup_keys)
def get_uuid(renew=False): OLD_UUID_NAME = "/etc/sysconfig/cluster/.cluster_device_uuid" if not config_store.ConfigStore.exists(DATASTORE_NAME): if os.path.isfile(OLD_UUID_NAME): uuid_content = open(OLD_UUID_NAME, "r").read().strip() try: the_uuid = uuid.UUID(uuid_content) except ValueError: # uuid is not readable, create new the_uuid = uuid.uuid4() try: os.unlink(OLD_UUID_NAME) except (IOError, OSError): pass else: the_uuid = uuid.uuid4() _create_cs = True elif renew: the_uuid = uuid.uuid4() _create_cs = True else: _create_cs = False if _create_cs: _cs = config_store.ConfigStore( DATASTORE_NAME, access_mode=config_store.AccessModeEnum.GLOBAL) _cs["cluster.device.uuid"] = the_uuid.urn _cs.write() the_uuid = uuid.UUID( config_store.ConfigStore( DATASTORE_NAME, quiet=True, access_mode=config_store.AccessModeEnum.GLOBAL, fix_access_mode=True, )["cluster.device.uuid"]) _write = False if not os.path.exists(NEW_UUID_NAME): _write = True else: old_uuid = open(NEW_UUID_NAME, "r").read().strip() if old_uuid != the_uuid.urn: _write = True if _write: try: open(NEW_UUID_NAME, "w").write("{}\n".format(the_uuid.urn)) except IOError: pass return the_uuid
def _add_lw_parser(self, sub_parser): client_cs = config_store.ConfigStore("client", quiet=True) _mach_name = process_tools.get_machine_name(short=True) parser = sub_parser.add_parser("logwatch", help="watch icsw logs") parser.set_defaults(subcom="status", execute=self._execute) # get logroot from config_store parser.add_argument("--root", type=str, default=client_cs["log.logdir"], help="root directory [%(default)s]") parser.add_argument("--machine", type=str, default=_mach_name, help="machine to use [%(default)s]") parser.add_argument("-n", type=int, default=400, help="show latest [%(default)d] lines") parser.add_argument("--format", type=str, default="%a %b %d %H:%M:%S %Y", help="argument for parsing loglines [%(default)s]") parser.add_argument( "-f", dest="follow", default=True, action="store_true", help="enable follow mode, always enabled [%(default)s]") parser.add_argument("-F", dest="follow", default=True, action="store_false", help="disable follow mode") parser.add_argument("--system-filter", type=str, default=".*", help="regexp filter for system [%(default)s]") parser.add_argument("--with-nodes", default=False, action="store_true", help="add node logs [%(default)s]") parser.add_argument("--node-filter", type=str, default=".*", help="regexp filter for nodes [%(default)s]") parser.add_argument("--verbose", default=False, action="store_true", help="enable verbose mode [%(default)s]") parser.add_argument("--show-unparseable", default=False, action="store_true", help="show unparseable lines [%(default)s]") parser.add_argument( "filter", nargs="*", type=str, help="list of regexp filter for system [%(default)s]") return parser
def process_init(self): self.__log_template = logging_tools.get_logger( global_config["LOG_NAME"], global_config["LOG_DESTINATION"], zmq=True, context=self.zmq_context, init_logger=True) self.CS = config_store.ConfigStore("client", self.log)
def process_init(self): self.__log_template = logging_functions.get_logger( config_store.ConfigStore("client", quiet=True), "{}/{}".format( process_tools.get_machine_name(), self.global_config["LOG_NAME"], ), process_name=self.name, ) self.CS = config_store.ConfigStore("client", self.log) self.commands = [] self.register_func("command_batch", self._command_batch) # commands pending becaus of missing package list self.pending_commands = [] # list of pending package commands self.package_commands = [] self.register_timer(self._check_commands, 10)
def post(self, request): _idx = request.POST["database_idx"] _cs = config_store.ConfigStore( GEN_CS_NAME, quiet=True, access_mode=config_store.AccessModeEnum.GLOBAL) _cs["default.database.idx"] = _idx _cs.write()
def __call__(self, srv_com, cur_ns): _cs = config_store.ConfigStore(ZMQ_ID_MAP_STORE, log_com=self.log, prefix="bind") if "target_ip" in srv_com: target_ip = srv_com["target_ip"].text else: target_ip = "0" srv_com["zmq_id"] = _cs["0"]["uuid"]
def main(): if not config_store.ConfigStore.exists(GEN_CS_NAME): # migrate new_store = config_store.ConfigStore( GEN_CS_NAME, access_mode=config_store.AccessModeEnum.GLOBAL) for _key, _value in get_old_local_settings().items(): new_store[_key] = _value new_store.write() new_store = config_store.ConfigStore(GEN_CS_NAME) if "db.auto.update" not in new_store: if os.path.exists(AUTO_FLAG): new_store["db.auto.update"] = True remove_file(AUTO_FLAG) else: new_store["db.auto.update"] = False if "mode.is.satellite" not in new_store: if os.path.exists(SATELLITE_FLAG): new_store["mode.is.satellite"] = True remove_file(SATELLITE_FLAG) else: new_store["mode.is.satellite"] = False if "mode.is.slave" not in new_store: if os.path.exists(SLAVE_FLAG): new_store["mode.is.slave"] = True remove_file(SLAVE_FLAG) else: new_store["mode.is.slave"] = False for _name, _default in [ ("create.default.network", True), ("create.network.device.types", True), ("session.multiple.per.user.allowed", False), ("missing.timezone.is.critical", True), ("overall.style", "condensed"), ("multiple.databases", False), ("default.database.idx", 0), ]: if _name not in new_store: new_store[_name] = _default new_store.write() remove_file(LS_OLD_FILE) migrate_uuid() migrate_db_cf()
def migrate_db_cf(): if not config_store.ConfigStore.exists( DB_ACCESS_CS_NAME) and os.path.exists(DB_FILE): _src_stat = os.stat(DB_FILE) _cs = config_store.ConfigStore( DB_ACCESS_CS_NAME, access_mode=config_store.AccessModeEnum.LOCAL) sql_dict = { key.split("_")[1]: value for key, value in [ line.strip().split("=", 1) for line in open(DB_FILE, "r").read().split("\n") if line.count("=") and line.count("_") and not line.count("NAGIOS") ] } for src_key in [ "DATABASE", "USER", "PASSWD", "HOST", "ENGINE", "PORT", ]: if src_key in sql_dict: _val = sql_dict[src_key] if _val.isdigit(): _val = int(_val) _cs["db.{}".format(src_key.lower())] = _val _cs.set_type("db.passwd", "password") _cs.write() # copy modes os.chown(_cs.file_name, _src_stat[stat.ST_UID], _src_stat[stat.ST_GID]) os.chmod(_cs.file_name, _src_stat[stat.ST_MODE]) # delete old file remove_file(DB_FILE) else: # check rights config_store.ConfigStore(DB_ACCESS_CS_NAME, access_mode=config_store.AccessModeEnum.LOCAL, fix_access_mode=True, fix_prefix_on_read=False)
def process_init(self): self.__log_template = logging_functions.get_logger( config_store.ConfigStore("client", quiet=True), "{}/{}".format( process_tools.get_machine_name(), self.global_config["LOG_NAME"], ), process_name=self.name, ) # log.startLoggingWithObserver(my_observer, setStdout=False) self.__debug = self.global_config["DEBUG"] self.register_func("resolve", self._resolve, greedy=True) # clear flag for extra twisted thread self.__cache = {}
def register_cluster(opts): cluster_id = device_variable.objects.get_cluster_id() _dict = { 'username': opts.user, 'password': opts.password, 'cluster_name': opts.cluster_name, 'cluster_id': cluster_id, "fingerprint": hfp_tools.get_server_fp(serialize=True), } _vers = config_store.ConfigStore(VERSION_CS_NAME, quiet=True) for _df in ["database", "software", "models"]: _dict["{}_version".format(_df)] = _vers[_df] data = urllib.parse.urlencode(_dict) try: res = urllib.request.urlopen(REGISTRATION_URL, data.encode("utf-8")) except urllib.error.URLError as e: print("Error while accessing registration: {}".format(e)) traceback.print_exc(e) sys.exit(1) else: content = res.read() try: content_xml = etree.fromstring(content) except: print("Error interpreting response: {}".format( process_tools.get_except_info())) sys.exit(-1) else: for message_xml in content_xml.xpath("//messages/message"): prefix = { 20: "", 30: "Warning: ", 40: "Error: " }.get(int(message_xml.get('log_level')), "") print("{}{}".format(prefix, message_xml.text)) code = int(content_xml.find("header").get("code")) if code < 40: # no error lic_file_node = content_xml.xpath( "//values/value[@name='license_file']") if len(lic_file_node): _install_license(lic_file_node[0].text) else: print("No license file found in response.") else: print("Exiting due to errors.") sys.exit(1)
def read_config_store(self): self.config_store = config_store.ConfigStore( CS_NAME, log_com=self.log, access_mode=config_store.AccessModeEnum.LOCAL) for _key, _default in DEFAULT_PROC_DICT.iteritems(): self.config_store[_key] = self.config_store.get(_key, _default) global_config.add_config_entries([ ("MON_TARGET_STATE", configfile.bool_c_var(self.config_store["start_process"])), # just a guess ("MON_CURRENT_STATE", configfile.bool_c_var(False)), ]) self.config_store.write() self.log(self._get_flag_info())
def do_uuid(conf): conf_dict = conf.conf_dict uuid_str = "urn:uuid:{}".format(conf_dict["device"].uuid) _cs = config_store.ConfigStore(uuid_tools.DATASTORE_NAME) _cs["cluster.device.uuid"] = uuid_str cdf_file = conf.add_file_object(_cs.file_name) cdf_file.append(_cs.show()) hm_uuid = conf.add_file_object("/etc/sysconfig/host-monitoring.d/0mq_id") hm_uuid.append( etree.tostring( E.bind_info(E.zmq_id(uuid_str, bind_address="*")), pretty_print=True, xml_declaration=True, )) old_uuid = conf.add_file_object("/opt/cluster/etc/.cluster_device_uuid") old_uuid.append(uuid_str)
def post(self, request): # django version _ckey = "_NEXT_URL_{}".format(request.META["REMOTE_ADDR"]) _next_url = cache.get(_ckey) cache.delete(_ckey) _cs = config_store.ConfigStore(GEN_CS_NAME, quiet=True) request.xml_response["login_hints"] = json.dumps(_get_login_hints()) request.xml_response["next_url"] = _next_url or "" request.xml_response["theme_default"] = settings.THEME_DEFAULT # request.xml_response["menu_default"] = "normal" request.xml_response["password_character_count"] = "{:d}".format( _cs["password.character.count"]) request.xml_response["icsw_databases"] = json.dumps([{ _key.replace(".", "_"): _value for _key, _value in _entry.items() } for _entry in list(settings.ICSW_DATABASE_DICT.values())]) request.xml_response[ "active_database_idx"] = settings.ICSW_ACTIVE_DATABASE_IDX
def post(self, request): # django version _vers = [] for _v in django.VERSION: if type(_v) == int: _vers.append("{:d}".format(_v)) else: break _ckey = "_NEXT_URL_{}".format(request.META["REMOTE_ADDR"]) _next_url = cache.get(_ckey) cache.delete(_ckey) _cs = config_store.ConfigStore(GEN_CS_NAME, quiet=True) request.xml_response["login_hints"] = json.dumps(_get_login_hints()) request.xml_response["django_version"] = ".".join(_vers) request.xml_response["next_url"] = _next_url or "" request.xml_response["theme_default"] = settings.THEME_DEFAULT request.xml_response["password_character_count"] = "{:d}".format( _cs["password.character.count"])
def __init__(self, mv, log_com): self.__log_com = log_com self.mv = mv self.mv_keys = set() self._pcp_proc_info = process_tools.find_file("pcp_proc_info") self.enabled = False if self._pcp_proc_info: self.log("found pcp_proc_info at {}".format(self._pcp_proc_info)) if config_store.ConfigStore.exists(CSTORE_NAME): self.enabled = True self._pgpool_config = config_store.ConfigStore( CSTORE_NAME, log_com=self.log) else: self.log("no config_store named {} found".format(CSTORE_NAME), logging_tools.LOG_LEVEL_WARN) else: self.log("foudn no pcp_proc_info, disabled monitoring", logging_tools.LOG_LEVEL_WARN)
def load_store(self): self.affinity_set = set() if config_store.ConfigStore.exists(AFFINITY_CSTORE): _afc = config_store.ConfigStore(AFFINITY_CSTORE, log_com=self.log) for _key in list(_afc.keys()): self.affinity_set.add(_afc[_key]) if self.affinity_set: self.feed_affinity = True self.log("affinity_set ({:d}): {}".format( len(self.affinity_set), ",".join(self.affinity_set))) affinity_re = re.compile("|".join( ["(%s)" % (line) for line in self.affinity_set])) self.af_struct = AffinityStruct(self, self.log, affinity_re) else: self.log("affinity-cstore {} missing".format(AFFINITY_CSTORE), logging_tools.LOG_LEVEL_ERROR) self.feed_affinity = False self.af_struct = None
def get_cluster_id(): import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "initat.cluster.settings") cluster_id = None try: from django.conf import settings except: pass else: from django.db import connection _cs = config_store.ConfigStore(GEN_CS_NAME, quiet=True) try: _sm = _cs["mode.is.satellite"] except: _sm = False if _sm: pass else: import django try: django.setup() except: pass else: from django.db.models import Q from initat.cluster.backbone.models import device_variable try: _vars = device_variable.objects.all().count() except: # database not initialised pass else: _vars = device_variable.objects.values_list( "val_str", flat=True ).filter( Q(name="CLUSTER_ID") & Q(device__device_group__cluster_device_group=True)) if len(_vars): cluster_id = _vars[0] return cluster_id
def show_cluster_id(opts): if opts.raw: print(device_variable.objects.get_cluster_id()) else: print("") print("ClusterID: {}".format(device_variable.objects.get_cluster_id())) _vers = config_store.ConfigStore(VERSION_CS_NAME, quiet=True) for _df in ["database", "software", "models"]: print("{} version: {}".format(_df.title(), _vers[_df])) print("") if not opts.without_fp: _valid, _log = hfp_tools.server_dict_is_valid( hfp_tools.get_server_fp()) if not _valid: print(_log) else: print(_log) print("") print("Current Server Fingerprint:") print("") print(hfp_tools.get_server_fp(serialize=True))
def get_safe_cluster_var(var_name, default=None): import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "initat.cluster.settings") var_value = default try: from django.conf import settings except: pass else: from django.db import connection _cs = config_store.ConfigStore(GEN_CS_NAME, quiet=True) try: _sm = _cs["mode.is.satellite"] except: _sm = False if _sm: pass else: import django try: django.setup() except: pass else: try: from initat.cluster.backbone.models import device_variable if var_name == "name": var_value = device_variable.objects.get_cluster_name(default) elif var_name == "id": var_value = device_variable.objects.get_cluster_id(default) else: var_value = "unknown attribute '{}'".format(var_name) except: # may be improperly configured or something similar var_value = default return var_value
ALLOWED_HOSTS = ["*"] INTERNAL_IPS = ("127.0.0.1", ) MANAGERS = ADMINS MAIL_SERVER = "localhost" DATABASE_ROUTERS = ["initat.cluster.backbone.routers.icswDBRouter"] # config stores # database config _cs = config_store.ConfigStore(GEN_CS_NAME, quiet=True, access_mode=config_store.AccessModeEnum.GLOBAL) # version config # TODO: check for local config when running in debug (development) mode _vers = config_store.ConfigStore(VERSION_CS_NAME, quiet=True) _DEF_NAMES = ["database", "software", "models"] ICSW_VERSION_DICT = {_name: _vers.get(_name, "???") for _name in _DEF_NAMES} ICSW_DATABASE_VERSION = ICSW_VERSION_DICT["database"] ICSW_SOFTWARE_VERSION = ICSW_VERSION_DICT["software"] ICSW_MODELS_VERSION = ICSW_VERSION_DICT["models"] ICSW_DEBUG = process_tools.get_machine_name() in ["eddie", "lemmy"] # validate settings