def configure_sspl_syslog(self): """Configure log file path in rsyslog and update logrotate config file.""" system_files_root = "%s/low-level/files" % consts.SSPL_BASE_DIR sspl_log_file_path = Utility.get_config_value(consts.SSPL_CONFIG_INDEX, "SYSTEM_INFORMATION>sspl_log_file_path") sspl_sb_log_file_path = sspl_log_file_path.replace("/sspl.log","/sspl_support_bundle.log") iem_log_file_path = Utility.get_config_value(consts.SSPL_CONFIG_INDEX, "IEMSENSOR>log_file_path") manifest_log_file_path = sspl_log_file_path.replace("/sspl.log","/manifest.log") # IEM configuration os.makedirs("%s/iem/iec_mapping" % consts.PRODUCT_BASE_DIR, exist_ok=True) distutils.dir_util.copy_tree("%s/iec_mapping/" % system_files_root, "%s/iem/iec_mapping" % consts.PRODUCT_BASE_DIR) if not os.path.exists(consts.RSYSLOG_IEM_CONF): shutil.copyfile("%s/%s" % (system_files_root, consts.RSYSLOG_IEM_CONF), consts.RSYSLOG_IEM_CONF) # Update log location as per sspl.conf Utility.replace_expr(consts.RSYSLOG_IEM_CONF, 'File.*[=,"]', 'File="%s"' % iem_log_file_path) # SSPL rsys log configuration if not os.path.exists(consts.RSYSLOG_SSPL_CONF): shutil.copyfile("%s/%s" % (system_files_root, consts.RSYSLOG_SSPL_CONF), consts.RSYSLOG_SSPL_CONF) # Update log location as per sspl.conf Utility.replace_expr(consts.RSYSLOG_SSPL_CONF, 'File.*[=,"]', 'File="%s"' % sspl_log_file_path) # Manifest Bundle log configuration if not os.path.exists(consts.RSYSLOG_MSB_CONF): shutil.copyfile("%s/%s" % (system_files_root, consts.RSYSLOG_MSB_CONF), consts.RSYSLOG_MSB_CONF) # Update log location as per sspl.conf Utility.replace_expr(consts.RSYSLOG_MSB_CONF, 'File.*[=,"]', 'File="%s"' % manifest_log_file_path) # Support Bundle log configuration if not os.path.exists(consts.RSYSLOG_SB_CONF): shutil.copyfile("%s/%s" % (system_files_root, consts.RSYSLOG_SB_CONF), consts.RSYSLOG_SB_CONF) # Update log location as per sspl.conf Utility.replace_expr(consts.RSYSLOG_SB_CONF, 'File.*[=,"]', 'File="%s"' % sspl_sb_log_file_path) # Configure logrotate # Create logrotate dir in case it's not present os.makedirs(consts.LOGROTATE_DIR, exist_ok=True) Utility.replace_expr("%s/etc/logrotate.d/iem_messages" % system_files_root, 0, iem_log_file_path) Utility.replace_expr("%s/etc/logrotate.d/sspl_logs" % system_files_root, 0, sspl_log_file_path) Utility.replace_expr("%s/etc/logrotate.d/sspl_sb_logs" % system_files_root, 0, sspl_sb_log_file_path) shutil.copy2("%s/etc/logrotate.d/iem_messages" % system_files_root, consts.IEM_LOGROTATE_CONF) shutil.copy2("%s/etc/logrotate.d/sspl_logs" % system_files_root, consts.SSPL_LOGROTATE_CONF) shutil.copy2("%s/etc/logrotate.d/manifest_logs" % system_files_root, consts.MSB_LOGROTATE_CONF) shutil.copy2("%s/etc/logrotate.d/sspl_sb_logs" % system_files_root, consts.SB_LOGROTATE_CONF) # This rsyslog restart will happen after successful updation of rsyslog # conf file and before sspl starts. If at all this will be removed from # here, there will be a chance that SSPL intial logs will not be present in # "/var/log/<product>/sspl/sspl.log" file. So, initial logs needs to be collected from # "/var/log/messages" service = DbusServiceHandler() service.restart('rsyslog.service')
class SystemdService(Debug): """Handles service request messages to systemd""" ACTUATOR_NAME = "SystemdService" @staticmethod def name(): """ @return: name of the module.""" return SystemdService.ACTUATOR_NAME def __init__(self): super(SystemdService, self).__init__() # Use d-bus to communicate with systemd # Described at: http://www.freedesktop.org/wiki/Software/systemd/dbus/ # Obtain an instance of d-bus to communicate with systemd self._bus = SystemBus() # Obtain a manager interface to d-bus for communications with systemd systemd = self._bus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1') self._manager = Interface( systemd, dbus_interface='org.freedesktop.systemd1.Manager') # Subscribe to signal changes self._manager.Subscribe() # create service cls obj. self._service = DbusServiceHandler() def perform_request(self, jsonMsg): """Performs the service request""" self._check_debug(jsonMsg) # Parse out the service name and request to perform on it if jsonMsg.get("actuator_request_type").get("service_controller") \ is not None: self._service_name = jsonMsg.get("actuator_request_type").\ get("service_controller").get("service_name") self._service_request = jsonMsg.get("actuator_request_type").\ get("service_controller").get("service_request") else: self._service_name = jsonMsg.get("actuator_request_type").\ get("service_watchdog_controller").get("service_name") self._service_request = jsonMsg.get("actuator_request_type").\ get("service_watchdog_controller").get("service_request") logger.debug("perform_request, service_name: %s, service_request: %s" % \ (self._service_name, self._service_request)) try: # Load the systemd unit for the service systemd_unit = self._manager.LoadUnit(self._service_name) # Get a proxy to systemd for accessing properties of units self._proxy = self._bus.get_object("org.freedesktop.systemd1", \ str(systemd_unit)) # The returned result of the desired action result = {} is_err_response = False if self._service_request in ['restart', 'start']: # Before restart/start the service, check service state. # If it is not active or activating then only process # restart/start request. service_state = self._service.get_state(self._service_name) state = service_state.state if state not in ['active', 'activating']: if self._service_request == "restart": self._service.restart(self._service_name) elif self._service_request == "start": self._service.start(self._service_name) # Ensure we get an "active" state and not "activating" service_state = self._service.get_state(self._service_name) state = service_state.state max_wait = 0 while state != "active": logger.debug( "%s status is activating, needs 'active' " "state after %s request has been processed, retrying" % (self._service_name, self._service_request)) time.sleep(1) max_wait += 1 if max_wait > 20: logger.debug("maximum wait - %s seconds, for " "service restart reached." % max_wait) break service_state = self._service.get_state( self._service_name) state = service_state.state else: is_err_response = True err_msg = ( "Can not process %s request, for %s, as service " "is already in %s state." % (self._service_request, self._service_name, state)) logger.error(err_msg) return (self._service_name, err_msg, is_err_response) elif self._service_request == "stop": self._service.stop(self._service_name) elif self._service_request == "status": # Return the status below service_status = self._service.get_state(self._service_name) # TODO: Use cortx.utils Service class methods for # enable/disable services. elif self._service_request == "enable": service_list = [] service_list.append(self._service_name) # EnableUnitFiles() function takes second argument as boolean. # 'True' will enable a service for runtime only(creates symlink # in /run/.. directory) 'False' will enable a service # persistently (creates symlink in /etc/.. directory) _, dbus_result = self._manager.EnableUnitFiles( service_list, False, True) res = parse_enable_disable_dbus_result(dbus_result) result.update(res) logger.debug("perform_request, result for enable request: " "result: %s" % (result)) elif self._service_request == "disable": service_list = [] service_list.append(self._service_name) # DisableUnitFiles() function takes second argument as boolean. # 'True' will disable a service for runtime only(removes symlink # from /run/.. directory) 'False' will disable a service # persistently(removes symlink from /etc/.. directory) dbus_result = self._manager.DisableUnitFiles( service_list, False) res = parse_enable_disable_dbus_result(dbus_result) result.update(res) logger.debug( "perform_request, result for disable request: %s" % result) else: logger.error("perform_request, Unknown service request - %s " "for service - %s" % (self._service_request, self._service_name)) is_err_response = True return (self._service_name, "Unknown service request", is_err_response) except debus_exceptions.DBusException as error: is_err_response = True logger.exception("DBus Exception: %r" % error) return (self._service_name, str(error), is_err_response) except Exception as ae: logger.exception("SystemD Exception: %r" % ae) is_err_response = True return (self._service_name, str(ae), is_err_response) # Give the unit some time to finish starting/stopping to get final status time.sleep(5) # Get the current status of the process and return it back: service_status = self._service.get_state(self._service_name) pid = service_status.pid state = service_status.state substate = service_status.substate status = self._service.is_enabled(self._service_name) uptime = get_service_uptime(self._service_name) # Parse dbus output to fetch command line path with args. command_line = service_status.command_line_path command_line_path_with_args = [] for field in list(command_line[0][1]): command_line_path_with_args.append(str(field)) result["pid"] = pid result["state"] = state result["substate"] = substate result["status"] = status result["uptime"] = uptime result["command_line_path"] = command_line_path_with_args logger.debug("perform_request, state: %s, substate: %s" % (str(state), str(substate))) return (self._service_name, result, is_err_response)
class SSPLTestCmd: """Starts test based on plan (sanity|alerts|self_primary|self_secondary).""" def __init__(self, args: list): self.args = args self.name = "sspl_test" self.plan = "self_primary" self.avoid_rmq = False self.dbus_service = DbusServiceHandler() # Load global, sspl and test configs Conf.load(SSPL_CONFIG_INDEX, sspl_config_path) global_config_url = Conf.get( SSPL_CONFIG_INDEX, "SYSTEM_INFORMATION>global_config_copy_url") Conf.load(GLOBAL_CONFIG_INDEX, global_config_url) Conf.load(SSPL_TEST_CONFIG_INDEX, sspl_test_config_path) @staticmethod def validate(): """Check for required packages are installed.""" # python 3rd party package dependency pip3_3ps_packages_test = {"Flask": "1.1.1"} pkg_validator = PkgV() pkg_validator.validate_pip3_pkgs(host=None, pkgs=pip3_3ps_packages_test, skip_version_check=False) def process(self): self.plan = self.args.plan[0] self.avoid_rmq = self.args.avoid_rmq # Take back up of sspl test config sspl_test_backup = '/etc/sspl_tests.conf.back' shutil.copyfile(sspl_test_file_path, sspl_test_backup) # Add global config in sspl_test config and revert the changes once test completes. # Global config path in sspl_tests.conf will be referred by sspl_tests later global_config_copy_url = Conf.get( SSPL_CONFIG_INDEX, "SYSTEM_INFORMATION>global_config_copy_url") Conf.copy(GLOBAL_CONFIG_INDEX, SSPL_TEST_CONFIG_INDEX) Conf.set(SSPL_CONFIG_INDEX, "SYSTEM_INFORMATION>global_config_copy_url", sspl_test_config_path) Conf.save(SSPL_CONFIG_INDEX) # Enable & disable sensors based on environment update_sensor_info(SSPL_TEST_CONFIG_INDEX) # Get rabbitmq values from sspl.conf and update sspl_tests.conf rmq_passwd = Conf.get(SSPL_CONFIG_INDEX, "RABBITMQEGRESSPROCESSOR>password") Conf.set(SSPL_TEST_CONFIG_INDEX, "RABBITMQEGRESSPROCESSOR>password", rmq_passwd) Conf.save(SSPL_TEST_CONFIG_INDEX) # TODO: Move lines 90-116 & 125-127 to RunQATest class # Create dummy service and add service name in /etc/sspl.conf service_name = "dummy_service.service" service_file_path_src = f"{TEST_DIR}/alerts/os/dummy_service_files/dummy_service.service" service_executable_code_src = f"{TEST_DIR}/alerts/os/dummy_service_files/dummy_service.py" service_file_path_des = "/etc/systemd/system" service_executable_code_des = "/var/cortx/sspl/test" os.makedirs(service_executable_code_des, 0o777, exist_ok=True) shutil.copy(service_executable_code_src, f'{service_executable_code_des}/dummy_service.py') # Make service file executable. cmd = f"chmod +x {service_executable_code_des}/dummy_service.py" _, error, returncode = SimpleProcess(cmd).run() if returncode != 0: print("%s error occurred while executing cmd: %s" % (error, cmd)) print("failed to assign execute permission for dummy_service.py."\ " dummy_service will fail.") # Copy service file to /etc/systemd/system/ path. shutil.copyfile(service_file_path_src, f'{service_file_path_des}/dummy_service.service') cmd = "systemctl daemon-reload" _, error, returncode = SimpleProcess(cmd).run() if returncode != 0: print(f"failed to execute '{cmd}', systemctl will be unable"\ f" to manage the dummy_service.service \nError: {error}") self.dbus_service.enable(service_name) self.dbus_service.start(service_name) service_list = Conf.get(SSPL_CONFIG_INDEX, "SERVICEMONITOR>monitored_services") service_list.append(service_name) Conf.set(SSPL_CONFIG_INDEX, "SERVICEMONITOR>monitored_services", service_list) threshold_inactive_time_original = Conf.get( SSPL_CONFIG_INDEX, "SERVICEMONITOR>threshold_inactive_time") threshold_inactive_time_new = 30 Conf.set(SSPL_CONFIG_INDEX, "SERVICEMONITOR>threshold_inactive_time", threshold_inactive_time_new) Conf.save(SSPL_CONFIG_INDEX) # TODO: Convert shell script to python # from cortx.sspl.sspl_test.run_qa_test import RunQATest # RunQATest(self.plan, self.avoid_rmq).run() CMD = "%s/run_qa_test.sh %s %s" % (TEST_DIR, self.plan, self.avoid_rmq) output, error, returncode = SimpleProcess(CMD).run( realtime_output=True) # Restore the original path/file & service, then throw exception # if execution is failed. service_list.remove(service_name) Conf.set(SSPL_CONFIG_INDEX, "SERVICEMONITOR>monitored_services", service_list) Conf.set(SSPL_CONFIG_INDEX, "SERVICEMONITOR>threshold_inactive_time", threshold_inactive_time_original) Conf.set(SSPL_CONFIG_INDEX, "SYSTEM_INFORMATION>global_config_copy_url", global_config_copy_url) Conf.save(SSPL_CONFIG_INDEX) shutil.copyfile(sspl_test_backup, sspl_test_file_path) self.dbus_service.restart('sspl-ll.service') if returncode != 0: raise SetupError(returncode, "%s - ERROR: %s - CMD %s", self.name, error, CMD)
class SSPLPostInstall: """Prepare environment for SSPL service.""" name = "sspl_post_install" def __init__(self): """Initialize varibales for post install.""" consts.SSPL_LOG_PATH = "/var/log/%s/sspl/" % consts.PRODUCT_FAMILY consts.SSPL_BUNDLE_PATH = "/var/%s/sspl/bundle/" % consts.PRODUCT_FAMILY self.state_file = "%s/state.txt" % consts.DATA_PATH def validate(self): """Check below requirements are met in setup. 1. Check if given product is supported by SSPL 2. Check if given setup is supported by SSPL 3. Check if required pre-requisites softwares are installed. 4. Validate BMC connectivity 5. Validate storage controller connectivity """ machine_id = Utility.get_machine_id() # Validate input/provisioner configs self.product = Utility.get_config_value(consts.PRVSNR_CONFIG_INDEX, "cortx>release>product") self.setup = Utility.get_config_value(consts.PRVSNR_CONFIG_INDEX, "cortx>release>setup") node_type = Utility.get_config_value( consts.PRVSNR_CONFIG_INDEX, "server_node>%s>type" % machine_id) if node_type.lower() not in ["vm", "virtual"]: bmc_ip = Utility.get_config_value( consts.PRVSNR_CONFIG_INDEX, "server_node>%s>bmc>ip" % machine_id) enclosure_id = Utility.get_config_value( consts.PRVSNR_CONFIG_INDEX, "server_node>%s>storage>enclosure_id" % machine_id) Utility.get_config_value(consts.PRVSNR_CONFIG_INDEX, "storage_enclosure>%s>type" % enclosure_id) primary_ip = Utility.get_config_value( consts.PRVSNR_CONFIG_INDEX, "storage_enclosure>%s>controller>primary>ip" % enclosure_id) secondary_ip = Utility.get_config_value( consts.PRVSNR_CONFIG_INDEX, "storage_enclosure>%s>controller>secondary>ip" % enclosure_id) # Validate product support if self.product not in consts.enabled_products: msg = "Product '%s' is not in sspl supported product list: %s" % ( self.product, consts.enabled_products) logger.error(msg) raise SetupError(errno.EINVAL, msg) # Validate setup support if self.setup not in consts.setups: msg = "Setup '%s' is not in sspl supported setup list: %s" % ( self.setup, consts.setups) logger.error(msg) raise SetupError(errno.EINVAL, msg) # Validate required pip3s and rpms are installed self.validate_dependencies(self.setup) # Validate BMC & Storage controller IP reachability if node_type.lower() not in ["vm", "virtual"]: # cluster_id required for decrypting the secret is only available from # the prepare stage. However accessibility validation will be done in # prepare stage. So at this time, validating ip reachability is fine. NetworkV().validate("connectivity", [bmc_ip, primary_ip, secondary_ip]) @staticmethod def validate_dependencies(setup): """Validate pre-requisites software packages.""" pip3_3ps_packages_main = { "cryptography": "2.8", "jsonschema": "3.2.0", "pika": "1.1.0", "pyinotify": "0.9.6", "python-daemon": "2.2.4", "requests": "2.25.1", "zope.component": "4.6.2", "zope.event": "4.5.0", "zope.interface": "5.2.0" } rpm_3ps_packages = { "hdparm": "9.43", "ipmitool": "1.8.18", "lshw": "B.02.18", "python3": "3.6.8", "python36-dbus": "1.2.4", "python36-gobject": "3.22.0", "python36-paramiko": "2.1.1", "python36-psutil": "5.6.7", "shadow-utils": "4.6", "smartmontools": "7.0", "systemd-python36": "1.0.0", "udisks2": "2.8.4" } ssu_dependency_rpms = [ "sg3_utils", "gemhpi", "pull_sea_logs", "python-hpi", "zabbix-agent-lib", "zabbix-api-gescheit", "zabbix-xrtx-lib", "python-openhpi-baselib", "zabbix-collector" ] ssu_required_process = ["openhpid", "dcs-collectord"] vm_dependency_rpms = [] pkg_validator = PkgV() pkg_validator.validate_pip3_pkgs(host=socket.getfqdn(), pkgs=pip3_3ps_packages_main, skip_version_check=False) pkg_validator.validate_rpm_pkgs(host=socket.getfqdn(), pkgs=rpm_3ps_packages, skip_version_check=False) # Check for sspl required processes and misc dependencies if # setup/role is other than cortx if setup == "ssu": pkg_validator.validate("rpms", ssu_dependency_rpms) ServiceV().validate("isrunning", ssu_required_process) elif setup == "vm" or setup == "gw" or setup == "cmu": # No dependency currently. Keeping this section as it # may be needed in future. pkg_validator.validate("rpms", vm_dependency_rpms) # No processes to check in VM environment def process(self): """Create SSPL user and required config files.""" # dbus module import is implicit in cortx utils. Keeping this # after dependency validation will enrich the use of # validate_dependencies() method. from cortx.utils.service import DbusServiceHandler self.dbus_service = DbusServiceHandler() # Create and load sspl config self.create_sspl_conf() Conf.load(consts.SSPL_CONFIG_INDEX, consts.sspl_config_path) # Update sspl.conf with provisioner supplied input config copy Conf.set(consts.SSPL_CONFIG_INDEX, "SYSTEM_INFORMATION>global_config_copy_url", consts.global_config_path) Conf.save(consts.SSPL_CONFIG_INDEX) self.create_user() self.create_directories_and_ownership() self.configure_sspl_syslog() self.install_sspl_service_files() self.enable_sspl_service() def create_sspl_conf(self): """Install product specific sspl config.""" # Copy and load product specific sspl config if not os.path.exists(consts.file_store_config_path): shutil.copyfile( "%s/conf/sspl.conf.%s.yaml" % (consts.SSPL_BASE_DIR, self.product), consts.file_store_config_path) def create_user(self): """Add sspl-ll user and validate user creation.""" os.system("/usr/sbin/useradd -r %s -s /sbin/nologin \ -c 'User account to run the %s service'" % (consts.USER, consts.USER)) usernames = [x[0] for x in pwd.getpwall()] if consts.USER not in usernames: msg = "User %s doesn't exit. Please add user." % (consts.USER) logger.error(msg) raise SetupError(errno.EINVAL, msg) # Add sspl-ll user to required groups and sudoers file etc. sspl_reinit = "%s/low-level/framework/sspl_reinit" % consts.SSPL_BASE_DIR _, error, rc = SimpleProcess(sspl_reinit).run() if rc: msg = "%s failed for with error : %e" % (sspl_reinit, error) logger.error(msg) raise SetupError(rc, msg) def create_directories_and_ownership(self): """Create ras persistent cache directory and state file. Assign ownership recursively on the configured directory. The created state file will be used later by SSPL resourse agent(HA). """ # Extract the data path sspldp = Utility.get_config_value(consts.SSPL_CONFIG_INDEX, "SYSTEM_INFORMATION>data_path") if not sspldp: raise SetupError(errno.EINVAL, "Data path not set in sspl.conf") sspl_uid = Utility.get_uid(consts.USER) sspl_gid = Utility.get_gid(consts.USER) if sspl_uid == -1 or sspl_gid == -1: msg = "No user found with name : %s" % (consts.USER) logger.error(msg) raise SetupError(errno.EINVAL, msg) # Create sspl data directory if not exists os.makedirs(sspldp, exist_ok=True) # Create state file under sspl data directory if not os.path.exists(self.state_file): file = open(self.state_file, "w") file.close() Utility.set_ownership_recursively(consts.SSPL_CONFIGURED_DIR, sspl_uid, sspl_gid) # Create SSPL log and bundle directories os.makedirs(consts.SSPL_LOG_PATH, exist_ok=True) os.makedirs(consts.SSPL_BUNDLE_PATH, exist_ok=True) # Create /tmp/dcs/hpi if required. Not required for '<product>' role if self.setup != "cortx": os.makedirs(consts.HPI_PATH, mode=0o777, exist_ok=True) zabbix_uid = Utility.get_uid("zabbix") if zabbix_uid != -1: os.chown(consts.HPI_PATH, zabbix_uid, -1) # Create mdadm.conf to set ACL on it. with open(consts.MDADM_PATH, 'a'): os.utime(consts.MDADM_PATH) os.chmod(consts.MDADM_PATH, mode=0o666) os.chown(consts.MDADM_PATH, sspl_uid, -1) def configure_sspl_syslog(self): """Configure log file path in rsyslog and update logrotate config file.""" system_files_root = "%s/low-level/files" % consts.SSPL_BASE_DIR sspl_log_file_path = Utility.get_config_value( consts.SSPL_CONFIG_INDEX, "SYSTEM_INFORMATION>sspl_log_file_path") sspl_sb_log_file_path = sspl_log_file_path.replace( "/sspl.log", "/sspl_support_bundle.log") iem_log_file_path = Utility.get_config_value( consts.SSPL_CONFIG_INDEX, "IEMSENSOR>log_file_path") manifest_log_file_path = sspl_log_file_path.replace( "/sspl.log", "/manifest.log") setup_log_file_path = sspl_log_file_path.replace( "/sspl.log", "/sspl-setup.log") # IEM configuration os.makedirs("%s/iem/iec_mapping" % consts.PRODUCT_BASE_DIR, exist_ok=True) distutils.dir_util.copy_tree( "%s/iec_mapping/" % system_files_root, "%s/iem/iec_mapping" % consts.PRODUCT_BASE_DIR) if not os.path.exists(consts.RSYSLOG_IEM_CONF): shutil.copyfile( "%s/%s" % (system_files_root, consts.RSYSLOG_IEM_CONF), consts.RSYSLOG_IEM_CONF) # Update log location as per sspl.conf Utility.replace_expr(consts.RSYSLOG_IEM_CONF, 'File.*[=,"]', 'File="%s"' % iem_log_file_path) # SSPL rsys log configuration if not os.path.exists(consts.RSYSLOG_SSPL_CONF): shutil.copyfile( "%s/%s" % (system_files_root, consts.RSYSLOG_SSPL_CONF), consts.RSYSLOG_SSPL_CONF) # Update log location as per sspl.conf Utility.replace_expr(consts.RSYSLOG_SSPL_CONF, 'File.*[=,"]', 'File="%s"' % sspl_log_file_path) # Manifest Bundle log configuration if not os.path.exists(consts.RSYSLOG_MSB_CONF): shutil.copyfile( "%s/%s" % (system_files_root, consts.RSYSLOG_MSB_CONF), consts.RSYSLOG_MSB_CONF) # Update log location as per sspl.conf Utility.replace_expr(consts.RSYSLOG_MSB_CONF, 'File.*[=,"]', 'File="%s"' % manifest_log_file_path) # Support Bundle log configuration if not os.path.exists(consts.RSYSLOG_SB_CONF): shutil.copyfile( "%s/%s" % (system_files_root, consts.RSYSLOG_SB_CONF), consts.RSYSLOG_SB_CONF) # Update log location as per sspl.conf Utility.replace_expr(consts.RSYSLOG_SB_CONF, 'File.*[=,"]', 'File="%s"' % sspl_sb_log_file_path) # SSPL Setup log configuration if not os.path.exists(consts.RSYSLOG_SETUP_CONF): shutil.copyfile( "%s/%s" % (system_files_root, consts.RSYSLOG_SETUP_CONF), consts.RSYSLOG_SETUP_CONF) # Update log location as per sspl.conf Utility.replace_expr(consts.RSYSLOG_SETUP_CONF, 'File.*[=,"]', 'File="%s"' % setup_log_file_path) # Configure logrotate # Create logrotate dir in case it's not present os.makedirs(consts.LOGROTATE_DIR, exist_ok=True) Utility.replace_expr( "%s/etc/logrotate.d/iem_messages" % system_files_root, 0, iem_log_file_path) Utility.replace_expr( "%s/etc/logrotate.d/sspl_logs" % system_files_root, 0, sspl_log_file_path) Utility.replace_expr( "%s/etc/logrotate.d/sspl_sb_logs" % system_files_root, 0, sspl_sb_log_file_path) Utility.replace_expr( "%s/etc/logrotate.d/sspl_setup_logs" % system_files_root, 0, setup_log_file_path) shutil.copy2("%s/etc/logrotate.d/iem_messages" % system_files_root, consts.IEM_LOGROTATE_CONF) shutil.copy2("%s/etc/logrotate.d/sspl_logs" % system_files_root, consts.SSPL_LOGROTATE_CONF) shutil.copy2("%s/etc/logrotate.d/manifest_logs" % system_files_root, consts.MSB_LOGROTATE_CONF) shutil.copy2("%s/etc/logrotate.d/sspl_sb_logs" % system_files_root, consts.SB_LOGROTATE_CONF) shutil.copy2("%s/etc/logrotate.d/sspl_setup_logs" % system_files_root, consts.SETUP_LOGROTATE_CONF) # This rsyslog restart will happen after successful updation of rsyslog # conf file and before sspl starts. If at all this will be removed from # here, there will be a chance that SSPL intial logs will not be present in # "/var/log/<product>/sspl/sspl.log" file. So, initial logs needs to be collected from # "/var/log/messages" self.dbus_service.restart('rsyslog.service') def install_sspl_service_files(self): """Copy service file to systemd location based on product.""" # Create soft link for SINGLE product name service to existing LDR_R1, LR2 service # Instead of keeping separate service file for SINGLE product with same content. currentProduct = "%s/conf/sspl-ll.service.%s" % (consts.SSPL_BASE_DIR, self.product) if (self.product == "SINGLE" and not os.path.exists(currentProduct)) or \ (self.product == "DUAL" and not os.path.exists(currentProduct)): os.symlink( "%s/conf/sspl-ll.service.%s" % (consts.SSPL_BASE_DIR, self.product), currentProduct) if self.product == "CLUSTER" and not os.path.exists(currentProduct): os.symlink("%s/conf/sspl-ll.service.LR2" % (consts.SSPL_BASE_DIR), currentProduct) shutil.copyfile(currentProduct, "/etc/systemd/system/sspl-ll.service") def enable_sspl_service(self): """Enable sspl-ll service.""" self.dbus_service.enable("sspl-ll.service") daemon_reload_cmd = "systemctl daemon-reload" output, error, rc = SimpleProcess(daemon_reload_cmd).run() if rc != 0: logger.error(f"Failed in enable sspl service. ERROR: {error}") raise SetupError(rc, error, daemon_reload_cmd)
def config_sspl(self): if (os.geteuid() != 0): raise SetupError(errno.EINVAL, "Run this command with root privileges!!") if not os.path.isfile(consts.file_store_config_path): raise SetupError(errno.EINVAL, "Missing configuration!! Create and rerun.", consts.file_store_config_path) # Put minion id, consul_host and consul_port in conf file # Onward LDR_R2, salt will be abstracted out and it won't # exist as a hard dependeny of SSPL if consts.PRODUCT_NAME == "LDR_R1": from framework.utils.salt_util import SaltInterface salt_util = SaltInterface() salt_util.update_config_file(consts.file_store_config_path) if os.path.isfile(consts.SSPL_CONFIGURED): os.remove(consts.SSPL_CONFIGURED) # Add sspl-ll user to required groups and sudoers file etc. sspl_reinit = [ f"{consts.SSPL_BASE_DIR}/low-level/framework/sspl_reinit", self.product ] _, error, returncode = SimpleProcess(sspl_reinit).run() if returncode: raise SetupError( returncode, "%s/low-level/framework/sspl_reinit failed for" "product %s with error : %e", consts.SSPL_BASE_DIR, self.product, error) os.makedirs(consts.SSPL_CONFIGURED_DIR, exist_ok=True) with open(consts.SSPL_CONFIGURED, 'a'): os.utime(consts.SSPL_CONFIGURED) # SSPL Log file configuration # SSPL_LOG_FILE_PATH = self.getval_from_ssplconf('sspl_log_file_path') SSPL_LOG_FILE_PATH = Conf.get(consts.SSPL_CONFIG_INDEX, 'SYSTEM_INFORMATION>sspl_log_file_path') IEM_LOG_FILE_PATH = Conf.get(consts.SSPL_CONFIG_INDEX, 'IEMSENSOR>log_file_path') if SSPL_LOG_FILE_PATH: self.replace_expr(consts.RSYSLOG_SSPL_CONF, 'File.*[=,"]', 'File="%s"' % SSPL_LOG_FILE_PATH) self.replace_expr( f"{consts.SSPL_BASE_DIR}/low-level/files/etc/logrotate.d/sspl_logs", 0, SSPL_LOG_FILE_PATH) # IEM configuration # Configure log file path in Rsyslog and logrotate configuration file IEM_LOG_FILE_PATH = Conf.get(consts.SSPL_CONFIG_INDEX, 'IEMSENSOR>log_file_path') if IEM_LOG_FILE_PATH: self.replace_expr(consts.RSYSLOG_IEM_CONF, 'File.*[=,"]', 'File="%s"' % IEM_LOG_FILE_PATH) self.replace_expr( f'{consts.SSPL_BASE_DIR}/low-level/files/etc/logrotate.d/iem_messages', 0, IEM_LOG_FILE_PATH) else: self.replace_expr( consts.RSYSLOG_IEM_CONF, 'File.*[=,"]', 'File="/var/log/%s/iem/iem_messages"' % consts.PRODUCT_FAMILY) # Create logrotate dir in case it's not present for dev environment if not os.path.exists(consts.LOGROTATE_DIR): os.makedirs(consts.LOGROTATE_DIR) shutil.copy2( '%s/low-level/files/etc/logrotate.d/iem_messages' % consts.SSPL_BASE_DIR, consts.IEM_LOGROTATE_CONF) shutil.copy2( '%s/low-level/files/etc/logrotate.d/sspl_logs' % consts.SSPL_BASE_DIR, consts.SSPL_LOGROTATE_CONF) # This rsyslog restart will happen after successful updation of rsyslog # conf file and before sspl starts. If at all this will be removed from # here, there will be a chance that SSPL intial logs will not be present in # "/var/log/<product>/sspl/sspl.log" file. So, initial logs needs to be collected from # "/var/log/messages" service = DbusServiceHandler() service.restart('rsyslog.service') # For node replacement scenario consul will not be running on the new node. But, # there will be two instance of consul running on healthy node. When new node is configured # consul will be brought back on it. We are using VIP to connect to consul. So, if consul # is not running on new node, we dont need to error out. # If consul is not running, exit # Onward LDR_R2, consul will be abstracted out and it won't # exit as hard dependeny of SSPL if consts.PRODUCT_NAME == 'LDR_R1': if not os.path.exists(consts.REPLACEMENT_NODE_ENV_VAR_FILE): ServiceV().validate('isrunning', ['consul'], is_process=True) # Get the types of server and storage we are currently running on and # enable/disable sensor groups in the conf file accordingly. update_sensor_info(consts.SSPL_CONFIG_INDEX) self.create_message_types()