def main(): global_config.add_config_entries([ ("SERVER_PATH", configfile.BoolConfigVar( False, help_string="set server_path to store RRDs [%(default)s]")), ("RRD_DIR", configfile.StringConfigVar( "/var/cache/rrd", help_string="directory of rrd-files on local disc", database=True)), ("RRD_CACHED_DIR", configfile.StringConfigVar("/var/run/rrdcached", database=True)), ("RRD_CACHED_SOCKET", configfile.StringConfigVar("/var/run/rrdcached/rrdcached.sock", database=True)), ("GRAPHCONFIG_BASE", configfile.StringConfigVar("/opt/cluster/share/rrd_grapher/", help_string="name of colortable file", database=True)), ("COMPOUND_DIR", configfile.StringConfigVar( "/opt/cluster/share/rrd_grapher/", help_string="include dir for compound XMLs", database=True)), ]) run_code() sys.exit(0)
def main(): global_config.add_config_entries([ ("RRD_DIR", configfile.StringConfigVar( "/var/cache/rrd", help_string="directory of rrd-files on local disc", database=True)), ("RRD_CACHED_DIR", configfile.StringConfigVar("/var/run/rrdcached", database=True)), ("RRD_CACHED_SOCKET", configfile.StringConfigVar("/var/run/rrdcached/rrdcached.sock", database=True)), ("RRD_STEP", configfile.IntegerConfigVar(60, help_string="RRD step value", database=True)), ("AGGREGATE_DIR", configfile.StringConfigVar( "/opt/cluster/share/collectd", help_string="base dir for collectd aggregates")), ]) kill_previous() # late load after population of global_config from .server import server_process server_process().loop() os._exit(0)
def from_database(self, sc_result, init_list=[]): from django.db.models import Q from initat.tools import configfile from initat.cluster.backbone.models import config_blob, \ config_bool, config_int, config_str _VAR_LUT = { "int": config_int, "str": config_str, "blob": config_blob, "bool": config_bool, } self.add_config_entries(init_list, database=True) if sc_result.effective_device: # dict of local vars without specified host for short in [ "str", "int", "blob", "bool", ]: # very similiar code appears in config_tools.py src_sql_obj = _VAR_LUT[short].objects if init_list: src_sql_obj = src_sql_obj.filter( Q(name__in=[ var_name for var_name, _var_value in init_list ])) for db_rec in src_sql_obj.filter( Q(config=sc_result.config) & Q(config__device_config__device=sc_result. effective_device)).order_by("name"): var_name = db_rec.name source = "{}_table (pk={})".format(short, db_rec.pk) if isinstance(db_rec.value, array.array): new_val = configfile.StringConfigVar( db_rec.value.tostring(), source=source) elif short == "int": new_val = configfile.IntegerConfigVar(int( db_rec.value), source=source) elif short == "bool": new_val = configfile.BoolConfigVar(bool(db_rec.value), source=source) else: new_val = configfile.StringConfigVar(db_rec.value, source=source) _present_in_config = var_name in self if _present_in_config: # copy settings from config new_val.database = self.database(var_name) new_val.help_string = self.help_string(var_name) self.add_config_entries([(var_name.upper(), new_val)])
def __init__(self): threading_tools.icswProcessPool.__init__(self, "main") self.CC.init(icswServiceEnum.grapher_server, global_config) self.CC.check_config() # close connection (daemonizing) db_tools.close_connection() self.CC.read_config_from_db( [ ( "GRAPH_ROOT_DEBUG", configfile.StringConfigVar( os.path.abspath( os.path.join( settings.STATIC_ROOT_DEBUG, "graphs" ) ), database=True ) ), ( "GRAPH_ROOT", configfile.StringConfigVar( os.path.abspath( os.path.join( settings.STATIC_ROOT_DEBUG if global_config["DEBUG"] else settings.STATIC_ROOT, "graphs" ) ), database=True ) ), ] ) if global_config["RRD_CACHED_SOCKET"] == "/var/run/rrdcached.sock": global_config["RRD_CACHED_SOCKET"] = os.path.join(global_config["RRD_CACHED_DIR"], "rrdcached.sock") self.CC.log_config() # re-insert config self.CC.re_insert_config() self.register_exception("int_error", self._int_error) self.register_exception("term_error", self._int_error) self.register_exception("hup_error", self._hup_error) self.add_process(GraphProcess("graph"), start=True) self.add_process(GraphStaleProcess("stale"), start=True) db_tools.close_connection() self._init_network_sockets() DataStore.setup(self) self._db_debug = threading_tools.DBDebugBase(self.log, force_disable=False)
def main(options=None): global_config.add_config_entries( [ ( "COMMAND", configfile.StringConfigVar( options.COMMAND if options else "", ) ), ( "BACKUP_DATABASE", configfile.BoolConfigVar( options.BACKUP_DATABASE if options else False, ) ), ( "OPTION_KEYS", configfile.ArrayConfigVar( options.OPTION_KEYS if options else [], ) ), ( "SHOW_RESULT", configfile.BoolConfigVar( options.SHOW_RESULT if options else False, ) ), ] ) run_code() return 0
def main(options=None): global_config = configfile.get_global_config( process_tools.get_programm_name(), single_process_mode=True) prog_name = global_config.name() if prog_name == "collclient": global_config.add_config_entries([ ("IDENTITY_STRING", configfile.StringConfigVar(options.IDENTITY_STRING)), ("TIMEOUT", configfile.IntegerConfigVar(options.TIMEOUT)), ("COMMAND_PORT", configfile.IntegerConfigVar(options.COMMAND_PORT)), ("HOST", configfile.StringConfigVar(options.HOST)), ("ARGUMENTS", configfile.ArrayConfigVar(options.ARGUMENTS)), ]) ret_state = run_code(prog_name, global_config) return ret_state
def build_main(opt_ns): global_config.add_config_entries( [ ("VERBOSE", configfile.BoolConfigVar(opt_ns.verbose)), ("IGNORE_ERRORS", configfile.BoolConfigVar(opt_ns.ignore_errors)), ("LOG_DESTINATION", configfile.StringConfigVar(get_log_path(icswLogHandleTypes.log_py))), ("LOG_NAME", configfile.StringConfigVar("build_image")), ("BUILDERS", configfile.IntegerConfigVar(4)), ("OVERRIDE", configfile.BoolConfigVar(opt_ns.override)), ("CLEAR_LOCK", configfile.BoolConfigVar(opt_ns.clear_lock)), ("SET_LOCK", configfile.BoolConfigVar(opt_ns.set_lock)), ("SKIPCLEANUP", configfile.BoolConfigVar(opt_ns.skip_cleanup)), ("CHECK_SIZE", configfile.BoolConfigVar(True)), ("IMAGE_NAME", configfile.StringConfigVar(opt_ns.image)), ] ) return ServerProcess().loop()
def main(): global_config.add_config_entries([ ("MEMCACHE_ADDRESS", configfile.StringConfigVar("127.0.0.1", help_string="memcache address")), ]) # enable connection debugging run_code() # exit os._exit(0)
def __init__(self): threading_tools.icswProcessPool.__init__(self, "main") self.register_exception("int_error", self._int_error) self.register_exception("term_error", self._int_error) self.CC.init(icswServiceEnum.config_server, global_config) self.CC.check_config() self.CC.read_config_from_db([ ("TFTP_DIR", configfile.StringConfigVar("/tftpboot")), ("MONITORING_PORT", configfile.IntegerConfigVar( InstanceXML(quiet=True).get_port_dict( icswServiceEnum.host_monitoring, command=True))), ("LOCALHOST_IS_EXCLUSIVE", configfile.BoolConfigVar(True)), ("HOST_CACHE_TIME", configfile.IntegerConfigVar(10 * 60)), ("WRITE_REDHAT_HWADDR_ENTRY", configfile.BoolConfigVar(True)), ("ADD_NETDEVICE_LINKS", configfile.BoolConfigVar(False)), ]) global_config.add_config_entries([ ("CONFIG_DIR", configfile.StringConfigVar( os.path.join(global_config["TFTP_DIR"], "config"))), ("IMAGE_DIR", configfile.StringConfigVar( os.path.join(global_config["TFTP_DIR"], "images"))), ("KERNEL_DIR", configfile.StringConfigVar( os.path.join(global_config["TFTP_DIR"], "kernels"))), ]) self.__pid_name = global_config["PID_NAME"] # close DB connection (daemonize) db_tools.close_connection() self.CC.re_insert_config() self._log_config() self._init_subsys() self._init_network_sockets() self.add_process(BuildProcess("build"), start=True) db_tools.close_connection() self.register_func("client_update", self._client_update) self.register_func("complex_result", self._complex_result) self.__run_idx = 0 self.__pending_commands = {}
def main(): global_config.add_config_entries([ ("INITIAL_CONFIG_RUN", configfile.BoolConfigVar( False, help_string="make a config build run on startup [%(default)s]")), ("MEMCACHE_ADDRESS", configfile.StringConfigVar("127.0.0.1", help_string="memcache address")), ]) run_code() # exit os._exit(0)
def main(): global_config = configfile.get_global_config( process_tools.get_programm_name(), single_process_mode=True, ) ret_code = 0 ps_file_name = "/etc/packageserver" if not os.path.isfile(ps_file_name): try: open(ps_file_name, "w").write("localhost\n") except: print("error writing to {}: {}".format( ps_file_name, process_tools.get_except_info())) ret_code = 5 else: pass try: global_config.add_config_entries([ ("PACKAGE_SERVER", configfile.StringConfigVar( open(ps_file_name, "r").read().strip().split("\n")[0].strip())), ("VERSION", configfile.StringConfigVar(VERSION_STRING)), ]) except: print("error reading from {}: {}".format( ps_file_name, process_tools.get_except_info())) ret_code = 5 if not ret_code: global_config.add_config_entries([ ("DEBIAN", configfile.BoolConfigVar(os.path.isfile("/etc/debian_version"))) ]) run_code(global_config) # exit os._exit(0) return 0
def main(): # read global configfile prog_name = global_config.name() global_config.add_config_entries( [ ("BASEDIR_NAME", configfile.StringConfigVar("/etc/sysconfig/snmp-relay.d")), ("SNMP_PROCESSES", configfile.IntegerConfigVar(4, help_string="number of SNMP processes [%(default)d]")), ("MAIN_TIMER", configfile.IntegerConfigVar(60, help_string="main timer [%(default)d]")), ("MAX_CALLS", configfile.IntegerConfigVar(100, help_string="number of calls per helper process [%(default)d]")), ( "PID_NAME", configfile.StringConfigVar( os.path.join( prog_name, prog_name ) ) ), ] ) process_tools.ALLOW_MULTIPLE_INSTANCES = False run_code() # exit os._exit(0)
def __init__(self): threading_tools.icswProcessPool.__init__(self, "main") self.CC.init(icswServiceEnum.meta_server, global_config, native_logging=True) self.CC.check_config() self.CC.CS.copy_to_global_config(global_config, [ ("meta.track.icsw.memory", "TRACK_CSW_MEMORY"), ("meta.check.time", "MIN_CHECK_TIME"), ("meta.check.memory.time", "MIN_MEMCHECK_TIME"), ]) global_config.add_config_entries([ ("STATE_DIR", configfile.StringConfigVar(os.path.join( self.CC.CS["meta.maindir"], ".srvstate"), source="dynamic")), ]) self.__debug = global_config["DEBUG"] # check for correct rights self._check_dirs() self._init_network_sockets() self._init_inotify() self.register_exception("int_error", self._sigint) self.register_exception("term_error", self._sigint) # init stuff for mailing self.__new_mail = mail_tools.icswMail( None, "{}@{}".format( self.CC.CS["meta.mail.from.name"], process_tools.get_fqdn()[0], ), self.CC.CS["mail.target.address"], ) self.__new_mail.set_server(self.CC.CS["mail.server"], self.CC.CS["mail.server"]) # msi dict self.__last_update_time = time.time( ) - 2 * global_config["MIN_CHECK_TIME"] self.__last_memcheck_time = time.time( ) - 2 * global_config["MIN_MEMCHECK_TIME"] self._init_meminfo() self.CC.log_config() self._init_statemachine() self.__next_stop_is_restart = False # wait for transactions if necessary self.__exit_process = False self.__transition_timer = False self.register_timer(self._check, 30, instant=True)
def VCM_check_md_version(self, global_config): start_time = time.time() _info_file = MON_DAEMON_INFO_FILE self.log( "checking type and version of installed monitoring daemon via file {}" .format(_info_file)) if os.path.isfile(_info_file): try: _content = { _key.strip(): _value.strip().replace("\"", "") for _key, _value in [ _line.split("=") for _line in open(_info_file, "r").read().split("\n") if _line.count("=") ] } except: self.log( "error reading from {}: {}".format( _info_file, process_tools.get_except_info()), logging_tools.LOG_LEVEL_ERROR) else: md_type = _content["MON_TYPE"].lower() md_versrel = _content["MON_VERSION"].lower() md_version, md_release = md_versrel.split("-", 1) global_config.add_config_entries([ ("MD_TYPE", configfile.StringConfigVar(md_type)), ("MD_VERSION_STRING", configfile.StringConfigVar(md_versrel)), ("MD_VERSION", configfile.StringConfigVar(md_version)), ("MD_RELEASE", configfile.StringConfigVar(md_release)), ("MD_BASEDIR", configfile.StringConfigVar( os.path.join("/opt", "cluster", md_type))), ("MAIN_CONFIG_NAME", configfile.StringConfigVar(md_type)), ("MD_LOCK_FILE", configfile.StringConfigVar("{}.lock".format(md_type))), ]) self.log("Discovered installed monitor-daemon {}, version {}". format(md_type, md_version)) end_time = time.time() self.log("monitor-daemon version discovery took {}".format( logging_tools.get_diff_time_str(end_time - start_time)))
def __init__(self): threading_tools.icswProcessPool.__init__(self, "main") self.CC.init(icswServiceEnum.logcheck_server, global_config) self.CC.check_config() self.CC.read_config_from_db([ ("SYSLOG_DIR", configfile.StringConfigVar("/var/log/hosts")), ("KEEP_LOGS_UNCOMPRESSED", configfile.IntegerConfigVar(2)), ("KEEP_LOGS_TOTAL", configfile.IntegerConfigVar(30)), ("KEEP_LOGS_TOTDDAL", configfile.IntegerConfigVar(30)), # maximum time in days to track logs ("LOGS_TRACKING_DAYS", configfile.IntegerConfigVar( 4, help_string="time to track logs in days")), # cachesize for lineinfo (per file) ("LINECACHE_ENTRIES_PER_FILE", configfile.IntegerConfigVar(50, help_string="line cache per file")), ]) # close connection (daemonizing) db_tools.close_connection() self.srv_helper = service_tools.ServiceHelper(self.log) self.CC.re_insert_config() self.register_exception("hup_error", self._hup_error) self.register_exception("int_error", self._int_error) self.register_exception("term_error", self._int_error) # log config self.CC.log_config() # prepare directories self._prepare_directories() # enable syslog_config self._enable_syslog_config() # network bind self._init_network_sockets() Machine.setup(self) self.my_scanner = LogcheckScanner(self) self.register_poller(Machine.get_watcher()._fd, zmq.POLLIN, Machine.inotify_event) self.register_timer(self.sync_machines, 3600, instant=True) self.register_timer(self.rotate_logs, 3600 * 12, instant=True)
def __init__(self): threading_tools.icswProcessPool.__init__(self, "main") self.CC.init(icswServiceEnum.report_server, global_config) self.CC.check_config() self.CC.log_config() self.register_exception("int_error", self._int_error) self.register_exception("term_error", self._int_error) self.register_exception("hup_error", self._hup_error) db_tools.close_connection() global_config.add_config_entries( [ ("REPORT_BASE_PATH", configfile.StringConfigVar("/opt/cluster/share/reports")) ] ) self.CC.re_insert_config() self._init_network_sockets() self.add_process( ReportGenerationProcess("report-generation"), start=True ) self.register_func("report_finished", self._report_finished) self._job_queue = [] self._wait_result = False
def main(): try: opts, args = getopt.getopt(sys.argv[1:], "u:g:d:ht:n:s:Sr:", ["help", "daemon", "sync-local", "sync-global"]) except getopt.GetoptError as bla: print("Commandline error : %s" % (process_tools.get_except_info())) sys.exit(2) pname = os.path.basename(sys.argv[0]) user_name = pwd.getpwuid(os.getuid())[0] group_name = grp.getgrgid(os.getgid())[0] loc_config = configfile.configuration( "bonnie", { "USER": configfile.StringConfigVar(user_name), "GROUP": configfile.StringConfigVar(group_name), "TMP_DIR": configfile.StringConfigVar("/tmp"), "UID": configfile.IntegerConfigVar(0), "GID": configfile.IntegerConfigVar(0), "SET_RAM_SIZE": configfile.BoolConfigVar(False), "RAM_SIZE": configfile.IntegerConfigVar(0), "DAEMONIZE": configfile.BoolConfigVar(False), "WAIT_SEMAPHORE": configfile.BoolConfigVar(True), "SYNC_GLOBAL": configfile.BoolConfigVar(False), "SYNC_LOCAL": configfile.BoolConfigVar(False), "NUM_TESTS": configfile.IntegerConfigVar(1), "BONNIE_SIZE": configfile.IntegerConfigVar(1024), "LOG_NAME": configfile.StringConfigVar("bonnie"), "LOG_DESTINATION": configfile.StringConfigVar("uds:/var/lib/logging-server/py_log"), "THREADS": configfile.StringConfigVar("1"), "RESULT_FILE": configfile.StringConfigVar("/tmp/bonnie_res_%s" % (time.ctime().replace(" ", "_").replace("__", "_"))) } ) for opt, arg in opts: if opt in ["-h", "--help"]: print("Usage: %s [OPTIONS]" % (pname)) print("where OPTIONS are:") print(" -h,--help this help") print(" -d DIR sets scratch directory, default is %s" % (loc_config["TMP_DIR"])) print(" -u user run as user USER, default is %s" % (loc_config["USER"])) print(" -g group run as group GROUP, default is %s" % (loc_config["GROUP"])) print(" -t THREADS set threads to start, <NUM>[:<NUM>[:<NUM>]], default is %s" % (loc_config["THREADS"])) print(" -n NUM sets number of tests, default is %d" % (loc_config["NUM_TESTS"])) print(" -s SIZE size, defaults to %d MB" % (loc_config["BONNIE_SIZE"])) print(" -r RAM RAM size to set, as default the RAM-Size will be discovered automatically") print(" -S do not wait for semaphore") print(" --daemon daemonize") print(" --sync-local sync before and after every bonnie run (thread-local)") print(" --sync-global sync before and after every bonnie run (thread-global)") sys.exit(0) if opt == "-u": loc_config["USER"] = arg if opt == "-g": loc_config["GROUP"] = arg if opt == "-d": loc_config["TMP_DIR"] = arg if opt == "--daemon": loc_config["DAEMONIZE"] = True if opt == "-t": loc_config["THREADS"] = arg if opt == "-n": loc_config["NUM_TESTS"] = int(arg) if opt == "-s": loc_config["BONNIE_SIZE"] = int(arg) if opt == "-S": loc_config["WAIT_SEMAPHORE"] = False if opt == "--sync-local": loc_config["SYNC_LOCAL"] = True if opt == "--sync-global": loc_config["SYNC_GLOBAL"] = True if opt == "-r": loc_config["SET_RAM_SIZE"] = True loc_config["RAM_SIZE"] = int(arg) print("Results will be written to %s" % (loc_config["RESULT_FILE"])) # check options if not os.path.isdir(loc_config["TMP_DIR"]): print("tmp_dir %s is no directory, exiting ..." % (loc_config["TMP_DIR"])) sys.exit(1) try: loc_config["UID"] = pwd.getpwnam(loc_config["USER"])[2] except: print("cannot get uid: %s" % (process_tools.get_except_info())) sys.exit(1) try: loc_config["GID"] = grp.getgrnam(loc_config["GROUP"])[2] except: print("cannot get gid: %s" % (process_tools.get_except_info())) sys.exit(1) if loc_config["THREADS"]: if [True for x in loc_config["THREADS"].split(":") if not x.isdigit()]: print("Wrong thread_info %s, exiting ..." % (loc_config["THREADS"])) sys.exit(0) else: print("empty thread_info, exiting ...") sys.exit(1) logger = logging_tools.get_logger( loc_config["LOG_NAME"], loc_config["LOG_DESTINATION"], ) if loc_config["DAEMONIZE"]: process_tools.become_daemon() # deprecated call hc_ok = process_tools.set_handles( { "out": (1, "bonnie.out"), # deprecated code "err": (0, "/var/lib/logging-server/py_err"), "strict": 0 } ) thread_pool = server_thread_pool(logger, loc_config) thread_pool.thread_loop() logger.info("CLOSE")
def __init__(self): threading_tools.icswProcessPool.__init__(self, "main") long_host_name, mach_name = process_tools.get_fqdn() self.__run_command = True if global_config["COMMAND"].strip() else False # rewrite LOG_NAME if necessary if self.__run_command: self.CC.init( icswServiceEnum.cluster_server, global_config, init_msi_block=False, log_name_postfix="direct-{}".format(global_config["COMMAND"]), ) else: self.CC.init( icswServiceEnum.cluster_server, global_config, ) self.CC.check_config() # close DB conncetion (daemonize) if not self.__run_command: # create hardware fingerprint self.CC.create_hfp() # enable memcache backend db_tools.close_connection() self.CC.read_config_from_db( [ ("IMAGE_SOURCE_DIR", configfile.StringConfigVar("/opt/cluster/system/images")), ("MAILSERVER", configfile.StringConfigVar("localhost")), ("FROM_NAME", configfile.StringConfigVar("quotawarning")), ("FROM_ADDR", configfile.StringConfigVar(long_host_name)), ("VERSION", configfile.StringConfigVar(VERSION_STRING, database=False)), ("QUOTA_ADMINS", configfile.StringConfigVar("*****@*****.**")), ("MONITOR_QUOTA_USAGE", configfile.BoolConfigVar(False, help_string="enabled quota usage tracking")), ("TRACK_ALL_QUOTAS", configfile.BoolConfigVar(False, help_string="also track quotas without limit")), ("QUOTA_CHECK_TIME_SECS", configfile.IntegerConfigVar(3600)), ("USER_MAIL_SEND_TIME", configfile.IntegerConfigVar(3600, help_string="time in seconds between two mails")), ("SERVER_FULL_NAME", configfile.StringConfigVar(long_host_name, database=False)), ("SERVER_SHORT_NAME", configfile.StringConfigVar(mach_name, database=False)), ("DATABASE_DUMP_DIR", configfile.StringConfigVar("/opt/cluster/share/db_backup")), ("DATABASE_KEEP_DAYS", configfile.IntegerConfigVar(30)), ("USER_SCAN_TIMER", configfile.IntegerConfigVar(7200, help_string="time in seconds between two user_scan runs")), ("NEED_ALL_NETWORK_BINDS", configfile.BoolConfigVar(True, help_string="raise an error if not all bind() calls are successfull")), ] ) if not self.__run_command: self.CC.re_insert_config() self.register_exception("int_error", self._int_error) self.register_exception("term_error", self._int_error) self.register_func("bg_finished", self._bg_finished) self._log_config() self._check_uuid() self._load_modules() self._set_next_backup_time(True) if self.__run_command: self.register_timer(self._run_command, 3600, instant=True) else: self._init_network_sockets() if not self["exit_requested"]: self.init_notify_framework(global_config) self.add_process(CapabilityProcess("capability_process"), start=True) self.add_process(LicenseChecker("license_checker"), start=True) db_tools.close_connection() self.register_timer( self._update, 2 if global_config["DEBUG"] else 30, instant=True )
def __init__(self): threading_tools.icswProcessPool.__init__( self, "main", ) self.CC.init(icswServiceEnum.rms_server, global_config) self.CC.check_config() db_tools.close_connection() sge_dict = {} _all_ok = True for v_name, v_src, v_default in [ ("SGE_ROOT", "/etc/sge_root", "/opt/sge"), ("SGE_CELL", "/etc/sge_cell", "default") ]: if os.path.isfile(v_src): sge_dict[v_name] = open(v_src, "r").read().strip() else: _all_ok = False sge_dict[v_name] = "" if _all_ok: stat, sge_dict["SGE_ARCH"], _log_lines = call_command( "/{}/util/arch".format(sge_dict["SGE_ROOT"])) if stat: sge_dict["SGE_ARCH"] = "" else: sge_dict["SGE_ARCH"] = "" self.CC.read_config_from_db([ ("CHECK_ITERATIONS", configfile.IntegerConfigVar(3)), ("RETRY_AFTER_CONNECTION_PROBLEMS", configfile.IntegerConfigVar(0)), ("FROM_ADDR", configfile.StringConfigVar("rms_server")), ("TO_ADDR", configfile.StringConfigVar("*****@*****.**")), ("SGE_ARCH", configfile.StringConfigVar(sge_dict["SGE_ARCH"])), ("SGE_ROOT", configfile.StringConfigVar(sge_dict["SGE_ROOT"])), ("SGE_CELL", configfile.StringConfigVar(sge_dict["SGE_CELL"])), ("FAIRSHARE_TREE_NODE_TEMPLATE", configfile.StringConfigVar("/{project}/{user}")), ("FAIRSHARE_TREE_DEFAULT_SHARES", configfile.IntegerConfigVar(1000)), ("TRACE_FAIRSHARE", configfile.BoolConfigVar(False)), ("CLEAR_ITERATIONS", configfile.IntegerConfigVar(1)), ("CHECK_ACCOUNTING_TIMEOUT", configfile.IntegerConfigVar(300)), ("LICENSE_BASE", configfile.StringConfigVar("/etc/sysconfig/licenses")), ("TRACK_LICENSES", configfile.BoolConfigVar(False)), ("TRACK_LICENSES_IN_DB", configfile.BoolConfigVar(False)), ("MODIFY_SGE_GLOBAL", configfile.BoolConfigVar(False)), ], ) # check modify_sge_global flag and set filesystem flag accordingly sge_license_tools.handle_license_policy( global_config["LICENSE_BASE"], global_config["MODIFY_SGE_GLOBAL"]) # re-insert config self.CC.re_insert_config() self.register_exception("int_error", self._int_error) self.register_exception("term_error", self._int_error) self.register_exception("hup_error", self._hup_error) self.register_func("job_ended", self._job_ended) self._log_config() # dc.release() self._init_network_sockets() self.add_process(RMSMonProcess("rms_mon"), start=True) self.add_process(AccountingProcess("accounting"), start=True) self.add_process(LicenseProcess("license"), start=True)
def __init__(self): long_host_name, mach_name = process_tools.get_fqdn() threading_tools.icswProcessPool.__init__(self, "main") self.CC.init(icswServiceEnum.monitor_server, global_config) self.CC.check_config() db_tools.close_connection() self.CC.read_config_from_db([ ("NETSPEED_WARN_MULT", configfile.FloatConfigVar(0.85)), ("NETSPEED_CRITICAL_MULT", configfile.FloatConfigVar(0.95)), ("NETSPEED_DEFAULT_VALUE", configfile.IntegerConfigVar(10000000)), ("CHECK_HOST_ALIVE_PINGS", configfile.IntegerConfigVar(5)), ("CHECK_HOST_ALIVE_TIMEOUT", configfile.FloatConfigVar(5.0)), ("ENABLE_COLLECTD", configfile.BoolConfigVar(False)), ("ENABLE_NAGVIS", configfile.BoolConfigVar(False)), ("ENABLE_FLAP_DETECTION", configfile.BoolConfigVar(False)), ("NAGVIS_DIR", configfile.StringConfigVar("/opt/nagvis4icinga")), ("NAGVIS_URL", configfile.StringConfigVar("/nagvis")), ("NONE_CONTACT_GROUP", configfile.StringConfigVar("none_group")), ("FROM_ADDR", configfile.StringConfigVar(long_host_name)), ("LOG_EXTERNAL_COMMANDS", configfile.BoolConfigVar(False)), ("LOG_PASSIVE_CHECKS", configfile.BoolConfigVar(False)), ("BUILD_CONFIG_ON_STARTUP", configfile.BoolConfigVar(True)), ("RELOAD_ON_STARTUP", configfile.BoolConfigVar(True)), ("RETAIN_HOST_STATUS", configfile.BoolConfigVar(True)), ("RETAIN_SERVICE_STATUS", configfile.BoolConfigVar(True)), ("PASSIVE_HOST_CHECKS_ARE_SOFT", configfile.BoolConfigVar(True)), ("RETAIN_PROGRAM_STATE", configfile.BoolConfigVar(False)), ("USE_HOST_DEPENDENCIES", configfile.BoolConfigVar(False)), ("USE_SERVICE_DEPENDENCIES", configfile.BoolConfigVar(False)), ("TRANSLATE_PASSIVE_HOST_CHECKS", configfile.BoolConfigVar(True)), ("USE_ONLY_ALIAS_FOR_ALIAS", configfile.BoolConfigVar(False)), ("HOST_DEPENDENCIES_FROM_TOPOLOGY", configfile.BoolConfigVar(False)), ("CCOLLCLIENT_TIMEOUT", configfile.IntegerConfigVar(10)), ("CSNMPCLIENT_TIMEOUT", configfile.IntegerConfigVar(20)), ("MAX_SERVICE_CHECK_SPREAD", configfile.IntegerConfigVar(5)), ("MAX_HOST_CHECK_SPREAD", configfile.IntegerConfigVar(5)), ("MAX_CONCURRENT_CHECKS", configfile.IntegerConfigVar(500)), ("CHECK_SERVICE_FRESHNESS", configfile.BoolConfigVar( True, help_string="enable service freshness checking")), ("CHECK_HOST_FRESHNESS", configfile.BoolConfigVar( True, help_string="enable host freshness checking")), ("SAFE_CC_NAME", configfile.BoolConfigVar(False)), ("SERVICE_FRESHNESS_CHECK_INTERVAL", configfile.IntegerConfigVar(60)), ("HOST_FRESHNESS_CHECK_INTERVAL", configfile.IntegerConfigVar(60)), ("SAFE_NAMES", configfile.BoolConfigVar( False, help_string= "convert all command descriptions to safe names (without spaces), [%(default)s]" )), ("ENABLE_ICINGA_LOG_PARSING", configfile.BoolConfigVar( True, help_string= "collect icinga logs in the database (required for status history and kpis)" )), ]) # copy flags self.__verbose = global_config["VERBOSE"] # log config self.CC.log_config() # re-insert config self.CC.re_insert_config() # init build control self.BC = BuildControl(self) self.register_exception("int_error", self._int_error) self.register_exception("term_error", self._int_error) self.register_exception("hup_error", self._hup_error) self._check_notification() # sync master uuid self.__sync_master_uuid = None # check special command check_special_commands(self.log) # from mixins self.VCM_check_md_version(global_config) self._init_network_sockets() if "MD_TYPE" in global_config: self.register_func("register_remote", self._register_remote) self.register_func("send_command", self._send_command) self.register_func("ocsp_results", self._ocsp_results) self.register_func("set_sync_master_uuid", self._set_sync_master_uuid) self.register_func("distribution_info", self._distribution_info) self.register_func("build_step", self.BC.build_step) self.add_process(SyncerProcess("syncer"), start=True) self.add_process(DynConfigProcess("dynconfig"), start=True) self.add_process(IcingaLogReader("IcingaLogReader"), start=True) self.add_process(KpiProcess("KpiProcess"), start=True) # wait for the processes to start time.sleep(0.5) self.register_timer(self._check_for_redistribute, 60 if global_config["DEBUG"] else 300) # only test code # self.send_to_remote_server( # "cluster-server", # unicode(server_command.srv_command(command="statusd")), # ) else: self.log("MD_TYPE not defined in global_config, exiting...", logging_tools.LOG_LEVEL_ERROR) self._int_error("no MD found")
def __init__(self): _long_host_name, mach_name = process_tools.get_fqdn() threading_tools.icswProcessPool.__init__(self, "main") self.register_exception("int_error", self._int_error) self.register_exception("term_error", self._int_error) self.CC.init(icswServiceEnum.mother_server, global_config) self.CC.check_config() # close db connection (for daemonizing) db_tools.close_connection() self.debug = global_config["DEBUG"] self.srv_helper = service_tools.ServiceHelper(self.log) self.__hs_port = InstanceXML(quiet=True).get_port_dict( icswServiceEnum.hoststatus, command=True) self.__hm_port = InstanceXML(quiet=True).get_port_dict( icswServiceEnum.host_monitoring, command=True) # log config self.CC.read_config_from_db([ ("TFTP_LINK", configfile.StringConfigVar("/tftpboot")), ("TFTP_DIR", configfile.StringConfigVar( os.path.join(CLUSTER_DIR, "system", "tftpboot"))), ("CLUSTER_DIR", configfile.StringConfigVar(CLUSTER_DIR)), # in 10th of seconds ("NODE_BOOT_DELAY", configfile.IntegerConfigVar(50)), ("FANCY_PXE_INFO", configfile.BoolConfigVar(False)), ("SERVER_SHORT_NAME", configfile.StringConfigVar(mach_name)), ("WRITE_DHCP_CONFIG", configfile.BoolConfigVar(True)), ("DHCP_AUTHORITATIVE", configfile.BoolConfigVar(False)), ("DHCP_ONLY_BOOT_NETWORKS", configfile.BoolConfigVar(True)), ("MODIFY_NFS_CONFIG", configfile.BoolConfigVar(True)), ("NEED_ALL_NETWORK_BINDS", configfile.BoolConfigVar(True)), ]) global_config.add_config_entries([ ("CONFIG_DIR", configfile.StringConfigVar( os.path.join(global_config["TFTP_DIR"], "config"))), ("ETHERBOOT_DIR", configfile.StringConfigVar( os.path.join(global_config["TFTP_DIR"], "etherboot"))), ("KERNEL_DIR", configfile.StringConfigVar( os.path.join(global_config["TFTP_DIR"], "kernels"))), ("SHARE_DIR", configfile.StringConfigVar( os.path.join(global_config["CLUSTER_DIR"], "share", "mother"))), ("NODE_SOURCE_IDX", configfile.IntegerConfigVar(LogSource.new("node").pk)), ]) self.CC.log_config() self.CC.re_insert_config() # prepare directories self._prepare_directories() # check netboot functionality self._check_netboot_functionality() # check nfs exports self._check_nfs_exports() # modify syslog config self._enable_syslog_config() # dhcp config self.write_dhcp_config() # check status entries self._check_status_entries() self.register_func("contact_hoststatus", self._contact_hoststatus) self.register_func("contact_hostmonitor", self._contact_hostmonitor) my_uuid = uuid_tools.get_uuid() self.log("cluster_device_uuid is '{}'".format(my_uuid.urn)) if self._init_network_sockets(): self.add_process(initat.mother.kernel.KernelSyncProcess("kernel"), start=True) self.add_process( initat.mother.command.ExternalCommandProcess("command"), start=True) self.add_process( initat.mother.control.NodeControlProcess("control"), start=True) self.add_process(initat.mother.control.ICMPProcess("icmp"), start=True) db_tools.close_connection() conf_dict = { key: global_config[key] for key in ["LOG_NAME", "LOG_DESTINATION", "VERBOSE"] } self.add_process(SNMPProcess("snmp_process", conf_dict=conf_dict), start=True) # send initial commands self.send_to_process( "kernel", "srv_command", str( server_command.srv_command(command="check_kernel_dir", insert_all_found="1"))) # restart hoststatus self.send_to_process("command", "delay_command", "/etc/init.d/hoststatus restart", delay_time=5) self.send_to_process("control", "refresh", refresh=False) else: self._int_error("bind problem")