def __init__(self): threading_tools.process_pool.__init__(self, "main", zmq=True) self.register_exception("int_error", self._int_error) self.register_exception("term_error", self._int_error) self.CC.init(icswServiceEnum.discovery_server, global_config) self.CC.check_config() # close connection (daemonize) db_tools.close_connection() self.CC.read_config_from_db([ ("SNMP_PROCESSES", configfile.int_c_var( 4, help_string="number of SNMP processes [%(default)d]", short_options="n")), ("MAX_CALLS", configfile.int_c_var( 100, help_string="number of calls per helper process [%(default)d]" )), ]) self.CC.re_insert_config() self.CC.log_config() self.add_process(DiscoveryProcess("discovery"), start=True) self.add_process(EventLogPollerProcess( EventLogPollerProcess.PROCESS_NAME), start=True) self.add_process(GenerateAssetsProcess("generate_assets"), start=True) self._init_network_sockets() self.register_func("snmp_run", self._snmp_run) self.register_func("generate_assets", self._generate_assets) self.register_func( "process_assets_finished", self._process_assets_finished, ) self.register_func( "process_batch_assets_finished", self._process_batch_assets_finished, ) self.register_func("send_msg", self.send_msg) db_tools.close_connection() self.__max_calls = global_config[ "MAX_CALLS"] if not global_config["DEBUG"] else 5 self.__snmp_running = True self._init_processes() # not really necessary self.install_remote_call_handlers() # clear pending scans self.clear_pending_scans() self.__run_idx = 0 self.__pending_commands = {} if process_tools.get_machine_name( ) == "eddiex" and global_config["DEBUG"]: self._test()
def main(): global_config.add_config_entries( [ ("DEBUG", configfile.bool_c_var(False, help_string="enable debug mode [%(default)s]", short_options="d", only_commandline=True)), ("VERBOSE", configfile.int_c_var(0, help_string="set verbose level [%(default)d]", short_options="v", only_commandline=True)), ] ) server_process().loop() os._exit(0)
def main(): prog_name = global_config.name() if COLLCLIENT: prog_name = "collclient" global_config.add_config_entries([ ("DEBUG", configfile.bool_c_var(False, help_string="enable debug mode [%(default)s]", short_options="d", only_commandline=True)), ("VERBOSE", configfile.int_c_var(0, help_string="set verbose level [%(default)d]", short_options="v", only_commandline=True)), ]) if prog_name == "collclient": global_config.add_config_entries([ ("IDENTITY_STRING", configfile.str_c_var("collclient", help_string="identity string", short_options="i")), ("TIMEOUT", configfile.int_c_var(10, help_string="set timeout [%(default)d", only_commandline=True)), ("COMMAND_PORT", configfile.int_c_var( InstanceXML(quiet=True).get_port_dict("host-monitoring", command=True), info="listening Port", help_string="port to communicate [%(default)d]", short_options="p")), ("HOST", configfile.str_c_var("localhost", help_string="host to connect to")), ]) options = global_config.handle_commandline( description="{}, version is {}".format(prog_name, VERSION_STRING), positional_arguments=prog_name in ["collclient"], partial=prog_name in ["collclient"], ) ret_state = run_code(prog_name, global_config) return ret_state
def __init__(self): threading_tools.process_pool.__init__(self, "main", zmq=True) self.register_exception("int_error", self._int_error) self.register_exception("term_error", self._int_error) self.CC.init(icswServiceEnum.config_server, global_config) self.CC.check_config() self.CC.read_config_from_db([ ("TFTP_DIR", configfile.str_c_var("/tftpboot")), ("MONITORING_PORT", configfile.int_c_var( InstanceXML(quiet=True).get_port_dict("host-monitoring", command=True))), ("LOCALHOST_IS_EXCLUSIVE", configfile.bool_c_var(True)), ("HOST_CACHE_TIME", configfile.int_c_var(10 * 60)), ("WRITE_REDHAT_HWADDR_ENTRY", configfile.bool_c_var(True)), ("ADD_NETDEVICE_LINKS", configfile.bool_c_var(False)), ]) global_config.add_config_entries([ ("CONFIG_DIR", configfile.str_c_var( os.path.join(global_config["TFTP_DIR"], "config"))), ("IMAGE_DIR", configfile.str_c_var( os.path.join(global_config["TFTP_DIR"], "images"))), ("KERNEL_DIR", configfile.str_c_var( os.path.join(global_config["TFTP_DIR"], "kernels"))), ]) self.__pid_name = global_config["PID_NAME"] # close DB connection (daemonize) db_tools.close_connection() self.CC.re_insert_config() self._log_config() self._init_subsys() self._init_network_sockets() self.add_process(build_process("build"), start=True) db_tools.close_connection() self.register_func("client_update", self._client_update) self.register_func("complex_result", self._complex_result) self.__run_idx = 0 self.__pending_commands = {}
def main(): prog_name = global_config.name() global_config.add_config_entries( [ ("DEBUG", configfile.bool_c_var(False, help_string="enable debug mode [%(default)s]", short_options="d", only_commandline=True)), ("VERBOSE", configfile.int_c_var(0, help_string="set verbose level [%(default)d]", short_options="v", only_commandline=True)), ("DELETE_MISSING_REPOS", configfile.bool_c_var(False, help_string="delete non-existing repos from DB")), ("SUPPORT_OLD_CLIENTS", configfile.bool_c_var(False, help_string="support old clients [%(default)s]", database=True)), ] ) run_code() os._exit(0)
def main(): global_config.add_config_entries([ ("DEBUG", configfile.bool_c_var(False, help_string="enable debug mode [%(default)s]", short_options="d", only_commandline=True)), ("VERBOSE", configfile.int_c_var(0, help_string="verbose lewel [%(default)s]", only_commandline=True)), ("SERVER_PATH", configfile.bool_c_var( False, help_string="set server_path to store RRDs [%(default)s]", only_commandline=True)), ("VERBOSE", configfile.int_c_var(0, help_string="set verbose level [%(default)d]", short_options="v", only_commandline=True)), ("RRD_DIR", configfile.str_c_var( "/var/cache/rrd", help_string="directory of rrd-files on local disc", database=True)), ("RRD_CACHED_DIR", configfile.str_c_var("/var/run/rrdcached", database=True)), ("RRD_CACHED_SOCKET", configfile.str_c_var("/var/run/rrdcached/rrdcached.sock", database=True)), ("GRAPHCONFIG_BASE", configfile.str_c_var("/opt/cluster/share/rrd_grapher/", help_string="name of colortable file")), ("COMPOUND_DIR", configfile.str_c_var("/opt/cluster/share/rrd_grapher/", help_string="include dir for compound XMLs")), ]) run_code() sys.exit(0)
def main(): # read global configfile prog_name = global_config.name() global_config.add_config_entries([ ("BASEDIR_NAME", configfile.str_c_var("/etc/sysconfig/snmp-relay.d")), ("DEBUG", configfile.bool_c_var(False, help_string="enable debug mode [%(default)s]", short_options="d", only_commandline=True)), ("SNMP_PROCESSES", configfile.int_c_var( 4, help_string="number of SNMP processes [%(default)d]", short_options="n")), ("MAIN_TIMER", configfile.int_c_var(60, help_string="main timer [%(default)d]")), ("LOG_NAME", configfile.str_c_var("snmp-relay")), ("LOG_DESTINATION", configfile.str_c_var("uds:/var/lib/logging-server/py_log_zmq")), ("MAX_CALLS", configfile.int_c_var( 100, help_string="number of calls per helper process [%(default)d]")), ("VERBOSE", configfile.int_c_var(0, help_string="set verbose level [%(default)d]", short_options="v", only_commandline=True)), ("PID_NAME", configfile.str_c_var(os.path.join(prog_name, prog_name))), ]) _options = global_config.handle_commandline( positional_arguments=False, partial=False, ) process_tools.ALLOW_MULTIPLE_INSTANCES = False run_code() # exit os._exit(0)
def main(): global_config.add_config_entries([ ("DEBUG", configfile.bool_c_var(False, help_string="enable debug mode [%(default)s]", short_options="d", only_commandline=True)), ("VERBOSE", configfile.int_c_var(0, help_string="verbose lewel [%(default)s]", only_commandline=True)), ("VERBOSE", configfile.int_c_var(0, help_string="set verbose level [%(default)d]", short_options="v", only_commandline=True)), ("RRD_DIR", configfile.str_c_var( "/var/cache/rrd", help_string="directory of rrd-files on local disc", database=True)), ("RRD_CACHED_DIR", configfile.str_c_var("/var/run/rrdcached", database=True)), ("RRD_CACHED_SOCKET", configfile.str_c_var("/var/run/rrdcached/rrdcached.sock", database=True)), ("RRD_STEP", configfile.int_c_var(60, help_string="RRD step value", database=True)), ("AGGREGATE_DIR", configfile.str_c_var("/opt/cluster/share/collectd", help_string="base dir for collectd aggregates")), ]) kill_previous() # late load after population of global_config from initat.collectd.server import server_process server_process().loop() os._exit(0)
def main(): global_config.add_config_entries( [ ("DEBUG", configfile.bool_c_var(False, help_string="enable debug mode [%(default)s]", short_options="d", only_commandline=True)), ("VERBOSE", configfile.int_c_var(0, help_string="set verbose level [%(default)d]", short_options="v", only_commandline=True)), ("INITIAL_CONFIG_RUN", configfile.bool_c_var(False, help_string="make a config build run on startup [%(default)s]", only_commandline=True)), ("MEMCACHE_ADDRESS", configfile.str_c_var("127.0.0.1", help_string="memcache address")), ] ) # enable connection debugging settings.DEBUG = global_config["DEBUG"] run_code() # exit os._exit(0)
def main(): prog_name = global_config.name() global_config.add_config_entries([ ("DEBUG", configfile.bool_c_var(False, help_string="enable debug mode [%(default)s]", short_options="d", only_commandline=True)), ("VERBOSE", configfile.int_c_var(0, help_string="set verbose level [%(default)d]", short_options="v", only_commandline=True)), ]) _options = global_config.handle_commandline( description="{}, version is {}".format(prog_name, VERSION_STRING), positional_arguments=False, partial=False) ret_code = 0 ps_file_name = "/etc/packageserver" if not os.path.isfile(ps_file_name): try: file(ps_file_name, "w").write("localhost\n") except: print("error writing to {}: {}".format( ps_file_name, process_tools.get_except_info())) ret_code = 5 else: pass try: global_config.add_config_entries([ ("PACKAGE_SERVER", configfile.str_c_var( file(ps_file_name, "r").read().strip().split("\n")[0].strip())), ("VERSION", configfile.str_c_var(VERSION_STRING)), ]) except: print("error reading from {}: {}".format( ps_file_name, process_tools.get_except_info())) ret_code = 5 if not ret_code: global_config.add_config_entries([ ("DEBIAN", configfile.bool_c_var(os.path.isfile("/etc/debian_version"))) ]) run_code() # exit os._exit(0) return 0
def __init__(self): threading_tools.process_pool.__init__(self, "main", zmq=True) self.CC.init(icswServiceEnum.logcheck_server, global_config) self.CC.check_config() self.CC.read_config_from_db([ ("SYSLOG_DIR", configfile.str_c_var("/var/log/hosts")), ("KEEP_LOGS_UNCOMPRESSED", configfile.int_c_var(2)), ("KEEP_LOGS_TOTAL", configfile.int_c_var(30)), ("KEEP_LOGS_TOTDDAL", configfile.int_c_var(30)), # maximum time in days to track logs ("LOGS_TRACKING_DAYS", configfile.int_c_var(4, info="time to track logs in days")), # cachesize for lineinfo (per file) ("LINECACHE_ENTRIES_PER_FILE", configfile.int_c_var(50, info="line cache per file")), ]) # close connection (daemonizing) db_tools.close_connection() self.srv_helper = service_tools.ServiceHelper(self.log) self.CC.re_insert_config() self.register_exception("hup_error", self._hup_error) self.register_exception("int_error", self._int_error) self.register_exception("term_error", self._int_error) # log config self.CC.log_config() # prepare directories self._prepare_directories() # enable syslog_config self._enable_syslog_config() # network bind self._init_network_sockets() Machine.setup(self) self.my_scanner = LogcheckScanner(self) self.register_poller(Machine.get_watcher()._fd, zmq.POLLIN, Machine.inotify_event) self.register_timer(self.sync_machines, 3600, instant=True) self.register_timer(self.rotate_logs, 3600 * 12, instant=True)
def build_main(opt_ns): global_config.add_config_entries( [ ("VERBOSE", configfile.bool_c_var(opt_ns.verbose)), ("IGNORE_ERRORS", configfile.bool_c_var(opt_ns.ignore_errors)), ("LOG_DESTINATION", configfile.str_c_var("uds:/var/lib/logging-server/py_log_zmq")), ("LOG_NAME", configfile.str_c_var("build_image")), ("BUILDERS", configfile.int_c_var(4)), ("OVERRIDE", configfile.bool_c_var(opt_ns.override)), ("CLEAR_LOCK", configfile.bool_c_var(opt_ns.clear_lock)), ("SET_LOCK", configfile.bool_c_var(opt_ns.set_lock)), ("SKIPCLEANUP", configfile.bool_c_var(opt_ns.skip_cleanup)), ("CHECK_SIZE", configfile.bool_c_var(True)), ("IMAGE_NAME", configfile.str_c_var(opt_ns.image)), ] ) return ServerProcess().loop()
def main(): global_config.add_config_entries( [ ("DEBUG", configfile.bool_c_var(False, help_string="enable debug mode [%(default)s]", short_options="d", only_commandline=True)), ("VERBOSE", configfile.int_c_var(0, help_string="set verbose level [%(default)d]", short_options="v", only_commandline=True)), ( "FORCE_SCAN", configfile.bool_c_var( False, help_string="force initial scan of accounting file [%(default)s]", action="store_true", only_commandline=True ) ), ] ) ServerProcess().loop() # exit os._exit(0)
def main(): global_config.add_config_entries([ ("DEBUG", configfile.bool_c_var(False, help_string="enable debug mode [%(default)s]", short_options="d", only_commandline=True)), ("DATABASE_DEBUG", configfile.bool_c_var( False, help_string="enable database debug mode [%(default)s]", only_commandline=True)), ("VERBOSE", configfile.int_c_var(0, help_string="set verbose level [%(default)d]", short_options="v", only_commandline=True)), ]) settings.DEBUG = global_config["DATABASE_DEBUG"] settings.DATABASE_DEBUG = global_config["DATABASE_DEBUG"] initat.mother.server.ServerProcess().loop() sys.exit(0)
def __init__(self, options): threading_tools.process_pool.__init__(self, "main", zmq=True) long_host_name, mach_name = process_tools.get_fqdn() self.__run_command = True if global_config["COMMAND"].strip() else False # rewrite LOG_NAME if necessary if self.__run_command: self.CC.init( icswServiceEnum.cluster_server, global_config, init_msi_block=False, log_name_postfix="direct-{}".format(global_config["COMMAND"]), ) else: self.CC.init( icswServiceEnum.cluster_server, global_config, ) self.CC.check_config() # close DB conncetion (daemonize) if self.__run_command: global_config.mc_prefix = global_config["COMMAND"] else: # create hardware fingerprint self.CC.create_hfp() # enable memcache backend global_config.enable_mc() db_tools.close_connection() self.CC.read_config_from_db( [ ("IMAGE_SOURCE_DIR", configfile.str_c_var("/opt/cluster/system/images")), ("MAILSERVER", configfile.str_c_var("localhost")), ("FROM_NAME", configfile.str_c_var("quotawarning")), ("FROM_ADDR", configfile.str_c_var(long_host_name)), ("VERSION", configfile.str_c_var(VERSION_STRING, database=False)), ("QUOTA_ADMINS", configfile.str_c_var("*****@*****.**")), ("MONITOR_QUOTA_USAGE", configfile.bool_c_var(False, info="enabled quota usage tracking")), ("TRACK_ALL_QUOTAS", configfile.bool_c_var(False, info="also track quotas without limit")), ("QUOTA_CHECK_TIME_SECS", configfile.int_c_var(3600)), ("USER_MAIL_SEND_TIME", configfile.int_c_var(3600, info="time in seconds between two mails")), ("SERVER_FULL_NAME", configfile.str_c_var(long_host_name, database=False)), ("SERVER_SHORT_NAME", configfile.str_c_var(mach_name, database=False)), ("DATABASE_DUMP_DIR", configfile.str_c_var("/opt/cluster/share/db_backup")), ("DATABASE_KEEP_DAYS", configfile.int_c_var(30)), ("USER_SCAN_TIMER", configfile.int_c_var(7200, info="time in seconds between two user_scan runs")), ("NEED_ALL_NETWORK_BINDS", configfile.bool_c_var(True, info="raise an error if not all bind() calls are successfull")), ] ) if not self.__run_command: self.CC.re_insert_config() self.register_exception("int_error", self._int_error) self.register_exception("term_error", self._int_error) self.register_func("bg_finished", self._bg_finished) self._log_config() self._check_uuid() self._load_modules() self.__options = options self._set_next_backup_time(True) if self.__run_command: self.register_timer(self._run_command, 3600, instant=True) else: self._init_network_sockets() if not self["exit_requested"]: self.init_notify_framework(global_config) self.add_process(CapabilityProcess("capability_process"), start=True) self.add_process(LicenseChecker("license_checker"), start=True) db_tools.close_connection() self.register_timer( self._update, 2 if global_config["DEBUG"] else 30, instant=True )
def main(opt_args=None): prog_name = global_config.name() global_config.add_config_entries([ ("DEBUG", configfile.bool_c_var(False, help_string="enable debug mode [%(default)s]", short_options="d", only_commandline=True)), ("DATABASE_DEBUG", configfile.bool_c_var( False, help_string="enable database debug mode [%(default)s]", only_commandline=True)), ("VERBOSE", configfile.int_c_var(0, help_string="set verbose level [%(default)d]", short_options="v", only_commandline=True)), ("CONTACT", configfile.bool_c_var( False, only_commandline=True, help_string= "directly connect cluster-server on localhost [%(default)s]")), ( "COMMAND", configfile.str_c_var( "", short_options= "c", # choices=[""] + initat.cluster_server.modules.command_names, only_commandline=True, help_string="command to execute", )), ("BACKUP_DATABASE", configfile.bool_c_var( False, only_commandline=True, help_string= "start backup of database immediately [%(default)s], only works in DEBUG mode" )), ("OPTION_KEYS", configfile.array_c_var( [], short_options="D", action="append", only_commandline=True, nargs="*", help_string="optional key:value pairs (command dependent)")), ("SHOW_RESULT", configfile.bool_c_var( False, only_commandline=True, help_string="show full XML result [%(default)s]")), ]) options = global_config.handle_commandline( *opt_args or [], description="{}, version is {}".format(prog_name, VERSION_STRING), positional_arguments=False) # enable connection debugging settings.DEBUG = global_config["DATABASE_DEBUG"] settings.DATABASE_DEBUG = global_config["DATABASE_DEBUG"] run_code(options) return 0
def check_config(self): # late import (for clients without django) if self.srv_type_enum.value.server_service: from initat.tools import config_tools from django.db.models import Q from initat.cluster.backbone.models import LogSource if self.srv_type_enum.value.instance_name is None: raise KeyError("No instance_name set for srv_type_enum '{}'".format(self.srv_type_enum.name)) self._instance = self._inst_xml[self.srv_type_enum.value.instance_name] # conf_names = self._inst_xml.get_config_names(self._instance) self.log( "check for service_type {} (==enum {})".format( self.srv_type_enum.value.name, self.srv_type_enum.name, ) ) _opts = [ ( "PID_NAME", configfile.str_c_var(self._inst_xml.get_pid_file_name(self._instance), source="instance", database=False) ), ] for _name, _value in self._inst_xml.get_port_dict(self._instance).iteritems(): _opts.append( ( "{}_PORT".format(_name.upper()), configfile.int_c_var(_value, source="instance", database=False) ), ) if self.srv_type_enum.value.server_service: self.__sql_info = config_tools.server_check(service_type_enum=self.srv_type_enum) if self.__sql_info is None or not self.__sql_info.effective_device: # this can normally not happen due to start / stop via meta-server self.log("Not a valid {}".format(self.srv_type_enum.name), logging_tools.LOG_LEVEL_ERROR) sys.exit(5) else: # check eggConsumers # set values _opts.extend( [ ( "SERVICE_ENUM_NAME", configfile.str_c_var(self.srv_type_enum.name), ), ( "SERVER_SHORT_NAME", configfile.str_c_var(process_tools.get_machine_name(True)), ), ( "SERVER_IDX", configfile.int_c_var(self.__sql_info.device.pk, database=False, source="instance") ), ( "CONFIG_IDX", configfile.int_c_var(self.__sql_info.config.pk, database=False, source="instance") ), ( "EFFECTIVE_DEVICE_IDX", configfile.int_c_var(self.__sql_info.effective_device.pk, database=False, source="instance") ), ( "LOG_SOURCE_IDX", configfile.int_c_var( LogSource.new(self.srv_type_enum.name, device=self.__sql_info.effective_device).pk, source="instance", ) ), ( "MEMCACHE_PORT", configfile.int_c_var(self._inst_xml.get_port_dict("memcached", command=True), source="instance") ), ] ) self.global_config.add_config_entries(_opts) if self.__init_msi_block: self.__pid_name = self.global_config["PID_NAME"] process_tools.save_pid(self.__pid_name) self.log("init MSI Block") self.__msi_block = process_tools.MSIBlock(self.srv_type_enum.value.msi_block_name) self.__msi_block.add_actual_pid(process_name="main") self.__msi_block.save()
def __init__(self): threading_tools.process_pool.__init__( self, "main", zmq=True, ) self.CC.init(icswServiceEnum.rms_server, global_config) self.CC.check_config() db_tools.close_connection() sge_dict = {} _all_ok = True for v_name, v_src, v_default in [ ("SGE_ROOT", "/etc/sge_root", "/opt/sge"), ("SGE_CELL", "/etc/sge_cell", "default") ]: if os.path.isfile(v_src): sge_dict[v_name] = file(v_src, "r").read().strip() else: _all_ok = False sge_dict[v_name] = "" if _all_ok: stat, sge_dict["SGE_ARCH"], _log_lines = call_command( "/{}/util/arch".format(sge_dict["SGE_ROOT"])) if stat: sge_dict["SGE_ARCH"] = "" else: sge_dict["SGE_ARCH"] = "" self.CC.read_config_from_db([ ("CHECK_ITERATIONS", configfile.int_c_var(3)), ("RETRY_AFTER_CONNECTION_PROBLEMS", configfile.int_c_var(0)), ("FROM_ADDR", configfile.str_c_var("rms_server")), ("TO_ADDR", configfile.str_c_var("*****@*****.**")), ("SGE_ARCH", configfile.str_c_var(sge_dict["SGE_ARCH"])), ("SGE_ROOT", configfile.str_c_var(sge_dict["SGE_ROOT"])), ("SGE_CELL", configfile.str_c_var(sge_dict["SGE_CELL"])), ("FAIRSHARE_TREE_NODE_TEMPLATE", configfile.str_c_var("/{project}/{user}")), ("FAIRSHARE_TREE_DEFAULT_SHARES", configfile.int_c_var(1000)), ("TRACE_FAIRSHARE", configfile.bool_c_var(False)), ("CLEAR_ITERATIONS", configfile.int_c_var(1)), ("CHECK_ACCOUNTING_TIMEOUT", configfile.int_c_var(300)), ("LICENSE_BASE", configfile.str_c_var("/etc/sysconfig/licenses")), ("TRACK_LICENSES", configfile.bool_c_var(False)), ("TRACK_LICENSES_IN_DB", configfile.bool_c_var(False)), ("MODIFY_SGE_GLOBAL", configfile.bool_c_var(False)), ], ) # check modify_sge_global flag and set filesystem flag accordingly sge_license_tools.handle_license_policy( global_config["LICENSE_BASE"], global_config["MODIFY_SGE_GLOBAL"]) # re-insert config self.CC.re_insert_config() self.register_exception("int_error", self._int_error) self.register_exception("term_error", self._int_error) self.register_exception("hup_error", self._hup_error) self.register_func("job_ended", self._job_ended) self._log_config() # dc.release() self._init_network_sockets() self.add_process(RMSMonProcess("rms_mon"), start=True) self.add_process(AccountingProcess("accounting"), start=True) self.add_process(LicenseProcess("license"), start=True)
def __init__(self): _long_host_name, mach_name = process_tools.get_fqdn() threading_tools.process_pool.__init__(self, "main", zmq=True) self.register_exception("int_error", self._int_error) self.register_exception("term_error", self._int_error) self.CC.init(icswServiceEnum.mother_server, global_config) self.CC.check_config() # close db connection (for daemonizing) db_tools.close_connection() self.debug = global_config["DEBUG"] self.srv_helper = service_tools.ServiceHelper(self.log) self.__hs_port = InstanceXML(quiet=True).get_port_dict("hoststatus", command=True) # log config self.CC.read_config_from_db([ ("TFTP_LINK", configfile.str_c_var("/tftpboot")), ("TFTP_DIR", configfile.str_c_var( os.path.join(CLUSTER_DIR, "system", "tftpboot"))), ("CLUSTER_DIR", configfile.str_c_var(CLUSTER_DIR)), # in 10th of seconds ("NODE_BOOT_DELAY", configfile.int_c_var(50)), ("FANCY_PXE_INFO", configfile.bool_c_var(False)), ("SERVER_SHORT_NAME", configfile.str_c_var(mach_name)), ("WRITE_DHCP_CONFIG", configfile.bool_c_var(True)), ("DHCP_AUTHORITATIVE", configfile.bool_c_var(False)), ("DHCP_ONLY_BOOT_NETWORKS", configfile.bool_c_var(True)), ("MODIFY_NFS_CONFIG", configfile.bool_c_var(True)), ("NEED_ALL_NETWORK_BINDS", configfile.bool_c_var(True)), ]) global_config.add_config_entries([ ("CONFIG_DIR", configfile.str_c_var( os.path.join(global_config["TFTP_DIR"], "config"))), ("ETHERBOOT_DIR", configfile.str_c_var( os.path.join(global_config["TFTP_DIR"], "etherboot"))), ("KERNEL_DIR", configfile.str_c_var( os.path.join(global_config["TFTP_DIR"], "kernels"))), ("SHARE_DIR", configfile.str_c_var( os.path.join(global_config["CLUSTER_DIR"], "share", "mother"))), ("NODE_SOURCE_IDX", configfile.int_c_var(LogSource.new("node").pk)), ]) self.CC.log_config() self.CC.re_insert_config() # prepare directories self._prepare_directories() # check netboot functionality self._check_netboot_functionality() # check nfs exports self._check_nfs_exports() # modify syslog config self._enable_syslog_config() # dhcp config self.write_dhcp_config() # check status entries self._check_status_entries() self.register_func("contact_hoststatus", self._contact_hoststatus) my_uuid = uuid_tools.get_uuid() self.log("cluster_device_uuid is '{}'".format(my_uuid.get_urn())) if self._init_network_sockets(): self.add_process(initat.mother.kernel.KernelSyncProcess("kernel"), start=True) self.add_process( initat.mother.command.ExternalCommandProcess("command"), start=True) self.add_process( initat.mother.control.NodeControlProcess("control"), start=True) self.add_process(initat.mother.control.ICMPProcess("icmp"), start=True) db_tools.close_connection() conf_dict = { key: global_config[key] for key in ["LOG_NAME", "LOG_DESTINATION", "VERBOSE"] } self.add_process(SNMPProcess("snmp_process", conf_dict=conf_dict), start=True) # send initial commands self.send_to_process( "kernel", "srv_command", unicode( server_command.srv_command(command="check_kernel_dir", insert_all_found="1"))) # restart hoststatus self.send_to_process("command", "delay_command", "/etc/init.d/hoststatus restart", delay_time=5) self.send_to_process("control", "refresh", refresh=False) else: self._int_error("bind problem")
def __init__(self): long_host_name, mach_name = process_tools.get_fqdn() threading_tools.process_pool.__init__(self, "main", zmq=True) self.CC.init(icswServiceEnum.monitor_server, global_config) self.CC.check_config() db_tools.close_connection() self.CC.read_config_from_db([ ("NETSPEED_WARN_MULT", configfile.float_c_var(0.85)), ("NETSPEED_CRITICAL_MULT", configfile.float_c_var(0.95)), ("NETSPEED_DEFAULT_VALUE", configfile.int_c_var(10000000)), ("CHECK_HOST_ALIVE_PINGS", configfile.int_c_var(5)), ("CHECK_HOST_ALIVE_TIMEOUT", configfile.float_c_var(5.0)), ("ENABLE_COLLECTD", configfile.bool_c_var(False)), ("ENABLE_NAGVIS", configfile.bool_c_var(False)), ("ENABLE_FLAP_DETECTION", configfile.bool_c_var(False)), ("NAGVIS_DIR", configfile.str_c_var("/opt/nagvis4icinga")), ("NAGVIS_URL", configfile.str_c_var("/nagvis")), ("NONE_CONTACT_GROUP", configfile.str_c_var("none_group")), ("FROM_ADDR", configfile.str_c_var(long_host_name)), ("LOG_EXTERNAL_COMMANDS", configfile.bool_c_var(False)), ("LOG_PASSIVE_CHECKS", configfile.bool_c_var(False)), ("BUILD_CONFIG_ON_STARTUP", configfile.bool_c_var(True)), ("RELOAD_ON_STARTUP", configfile.bool_c_var(True)), ("RETAIN_HOST_STATUS", configfile.bool_c_var(True)), ("RETAIN_SERVICE_STATUS", configfile.bool_c_var(True)), ("PASSIVE_HOST_CHECKS_ARE_SOFT", configfile.bool_c_var(True)), ("RETAIN_PROGRAM_STATE", configfile.bool_c_var(False)), ("USE_HOST_DEPENDENCIES", configfile.bool_c_var(False)), ("USE_SERVICE_DEPENDENCIES", configfile.bool_c_var(False)), ("TRANSLATE_PASSIVE_HOST_CHECKS", configfile.bool_c_var(True)), ("USE_ONLY_ALIAS_FOR_ALIAS", configfile.bool_c_var(False)), ("HOST_DEPENDENCIES_FROM_TOPOLOGY", configfile.bool_c_var(False)), ("CCOLLCLIENT_TIMEOUT", configfile.int_c_var(10)), ("CSNMPCLIENT_TIMEOUT", configfile.int_c_var(20)), ("MAX_SERVICE_CHECK_SPREAD", configfile.int_c_var(5)), ("MAX_HOST_CHECK_SPREAD", configfile.int_c_var(5)), ("MAX_CONCURRENT_CHECKS", configfile.int_c_var(500)), ("CHECK_SERVICE_FRESHNESS", configfile.bool_c_var( True, help_string="enable service freshness checking")), ("CHECK_HOST_FRESHNESS", configfile.bool_c_var( True, help_string="enable host freshness checking")), ("SAFE_CC_NAME", configfile.bool_c_var(False)), ("SERVICE_FRESHNESS_CHECK_INTERVAL", configfile.int_c_var(60)), ("HOST_FRESHNESS_CHECK_INTERVAL", configfile.int_c_var(60)), ("SAFE_NAMES", configfile.bool_c_var( False, help_string= "convert all command descriptions to safe names (without spaces), [%(default)s]" )), ("ENABLE_ICINGA_LOG_PARSING", configfile.bool_c_var( True, help_string= "collect icinga logs in the database (required for status history and kpis)" )), ]) # copy flags self.__verbose = global_config["VERBOSE"] # log config self.CC.log_config() # re-insert config self.CC.re_insert_config() # init build control self.BC = BuildControl(self) self.register_exception("int_error", self._int_error) self.register_exception("term_error", self._int_error) self.register_exception("hup_error", self._hup_error) self._check_notification() self._check_special_commands() # sync master uuid self.__sync_master_uuid = None # from mixins self.VCM_check_md_version() self._init_network_sockets() if "MD_TYPE" in global_config: self.register_func("register_remote", self._register_remote) self.register_func("send_command", self._send_command) self.register_func("ocsp_results", self._ocsp_results) self.register_func("set_sync_master_uuid", self._set_sync_master_uuid) self.register_func("distribution_info", self._distribution_info) self.register_func("build_step", self.BC.build_step) self.add_process(SyncerProcess("syncer"), start=True) self.add_process(DynConfigProcess("dynconfig"), start=True) self.add_process(IcingaLogReader("IcingaLogReader"), start=True) self.add_process(KpiProcess("KpiProcess"), start=True) # wait for the processes to start time.sleep(0.5) self.register_timer(self._check_for_redistribute, 60 if global_config["DEBUG"] else 300) # only test code # self.send_to_remote_server( # "cluster-server", # unicode(server_command.srv_command(command="statusd")), # ) else: self._int_error("no MD found")
def read_config_from_db(g_config, sql_info, init_list=[]): g_config.add_config_entries(init_list, database=True) if sql_info.effective_device: # dict of local vars without specified host l_var_wo_host = {} for short in [ "str", "int", "blob", "bool", ]: # very similiar code appears in config_tools.py src_sql_obj = _VAR_LUT[short].objects if init_list: src_sql_obj = src_sql_obj.filter( Q(name__in=[ var_name for var_name, _var_value in init_list ])) for db_rec in src_sql_obj.filter( Q(config=sql_info.config) & Q(config__device_config__device=sql_info.effective_device )).order_by("name"): if db_rec.name.count(":"): var_global = False local_host_name, var_name = db_rec.name.split(":", 1) else: var_global = True local_host_name, var_name = (sql_info.short_host_name, db_rec.name) source = "{}_table::{}".format(short, db_rec.pk) if isinstance(db_rec.value, array.array): new_val = configfile.str_c_var(db_rec.value.tostring(), source=source) elif short == "int": new_val = configfile.int_c_var(int(db_rec.value), source=source) elif short == "bool": new_val = configfile.bool_c_var(bool(db_rec.value), source=source) else: new_val = configfile.str_c_var(db_rec.value, source=source) new_val.is_global = var_global present_in_config = var_name in g_config if present_in_config: # copy settings from config new_val.database = g_config.database(var_name) new_val.is_global = var_global new_val._help_string = g_config.help_string(var_name) if local_host_name == sql_info.short_host_name: if var_name.upper() in g_config and g_config.fixed( var_name.upper()): # present value is fixed, keep value, only copy global / local status g_config.set_global(var_name.upper(), new_val.is_global) else: g_config.add_config_entries([(var_name.upper(), new_val)]) elif local_host_name == "": l_var_wo_host[var_name.upper()] = new_val # check for vars to insert for wo_var_name, wo_var in l_var_wo_host.iteritems(): if wo_var_name not in g_config or g_config.get_source( wo_var_name) == "default": g_config.add_config_entries([(wo_var_name, wo_var)])