def invoke(self, shell: Optional[bool] = False, cwd: Optional[str] = None) -> \ List[Tuple[List, bytes, bytes]]: utilities.makedirs(f'{self.cert_directory}') if not cwd: cwd = self.cert_directory results = super().invoke(shell=shell, cwd=cwd) utilities.safely_remove_file(f'{self.cert_directory}/admin-key-temp.pem') utilities.safely_remove_file(f'{self.cert_directory}/admin.csr') utilities.set_ownership_of_file(path=self.cert_directory, user='******', group='dynamite') utilities.set_permissions_of_file(file_path=self.cert_directory, unix_permissions_integer=700) utilities.set_permissions_of_file(file_path=f'{self.cert_directory}/{self.cert_name}', unix_permissions_integer=600) utilities.set_permissions_of_file(file_path=f'{self.cert_directory}/{self.key_name}', unix_permissions_integer=600) utilities.set_permissions_of_file(file_path=f'{self.cert_directory}/{self.trusted_ca_cert_name}', unix_permissions_integer=600) utilities.set_permissions_of_file(file_path=f'{self.cert_directory}/{self.trusted_ca_key_name}', unix_permissions_integer=600) es_main_config = config.ConfigManager(self.configuration_directory) es_main_config.transport_pem_cert_file = f'security/auth/{self.cert_name}' es_main_config.rest_api_pem_cert_file = es_main_config.transport_pem_cert_file es_main_config.transport_pem_key_file = f'security/auth/{self.key_name}' es_main_config.rest_api_pem_key_file = es_main_config.transport_pem_key_file es_main_config.transport_trusted_cas_file = f'security/auth/{self.trusted_ca_cert_name}' es_main_config.rest_api_trusted_cas_file = es_main_config.transport_trusted_cas_file es_main_config.commit() return results
def setup(self): utilities.makedirs(self.install_directory) utilities.makedirs(f'{self.install_directory}/{self.directory_name}') self.copy_java_files_and_directories() self.create_update_java_environment_variables() utilities.set_ownership_of_file(f'{self.install_directory}/{self.directory_name}', user='******', group='dynamite')
def setup_logstash_elastiflow(self, stdout=False): if stdout: sys.stdout.write( '[+] Creating elastiflow install|configuration directories.\n') subprocess.call('mkdir -p {}'.format(self.install_directory), shell=True) if stdout: sys.stdout.write('[+] Copying elastiflow configurations\n') utilities.copytree( os.path.join(const.DEFAULT_CONFIGS, 'logstash', 'zeek'), self.install_directory) utilities.set_ownership_of_file(self.install_directory, user='******', group='dynamite') if 'ELASTIFLOW_DICT_PATH' not in open( '/etc/dynamite/environment').read(): dict_path = os.path.join(self.install_directory, 'dictionaries') if stdout: sys.stdout.write( '[+] Updating Elastiflow dictionary configuration path [{}]\n' .format(dict_path)) subprocess.call( 'echo ELASTIFLOW_DICT_PATH="{}" >> /etc/dynamite/environment'. format(dict_path), shell=True) if 'ELASTIFLOW_TEMPLATE_PATH' not in open( '/etc/dynamite/environment').read(): template_path = os.path.join(self.install_directory, 'templates') if stdout: sys.stdout.write( '[+] Updating Elastiflow template configuration path [{}]\n' .format(template_path)) subprocess.call( 'echo ELASTIFLOW_TEMPLATE_PATH="{}" >> /etc/dynamite/environment' .format(template_path), shell=True) if 'ELASTIFLOW_GEOIP_DB_PATH' not in open( '/etc/dynamite/environment').read(): geo_path = os.path.join(self.install_directory, 'geoipdbs') if stdout: sys.stdout.write( '[+] Updating Elastiflow geodb configuration path [{}]\n'. format(geo_path)) subprocess.call( 'echo ELASTIFLOW_GEOIP_DB_PATH="{}" >> /etc/dynamite/environment' .format(geo_path), shell=True) if 'ELASTIFLOW_DEFINITION_PATH' not in open( '/etc/dynamite/environment').read(): def_path = os.path.join(self.install_directory, 'definitions') if stdout: sys.stdout.write( '[+] Updating Elastiflow definitions configuration path [{}]\n' .format(def_path)) subprocess.call( 'echo ELASTIFLOW_DEFINITION_PATH="{}" >> /etc/dynamite/environment' .format(def_path), shell=True) ElastiflowConfigurator().write_environment_variables()
def start(self, stdout=False): """ Start the Kibana process :param stdout: Print output to console :return: True, if started successfully """ def start_shell_out(): # We use su instead of runuser here because of nodes' weird dependency on PAM # when calling from within a sub-shell subprocess.call( 'su -l dynamite -c "{}/bin/kibana -c {} -l {} & > /dev/null &"' .format( self.config.kibana_home, os.path.join(self.config.kibana_path_conf, 'kibana.yml'), os.path.join(self.config.kibana_logs, 'kibana.log')), shell=True, env=utilities.get_environment_file_dict()) if not os.path.exists('/var/run/dynamite/kibana/'): subprocess.call('mkdir -p {}'.format('/var/run/dynamite/kibana/'), shell=True) utilities.set_ownership_of_file('/var/run/dynamite', user='******', group='dynamite') if not utilities.check_pid(self.pid): Process(target=start_shell_out).start() else: sys.stderr.write( '[-] Kibana is already running on PID [{}]\n'.format(self.pid)) return True retry = 0 self.pid = -1 time.sleep(5) while retry < 6: start_message = '[+] [Attempt: {}] Starting Kibana on PID [{}]\n'.format( retry + 1, self.pid) try: with open('/var/run/dynamite/kibana/kibana.pid') as f: self.pid = int(f.read()) start_message = '[+] [Attempt: {}] Starting Kibana on PID [{}]\n'.format( retry + 1, self.pid) if stdout: sys.stdout.write(start_message) if not utilities.check_pid(self.pid): retry += 1 time.sleep(5) else: return True except IOError: if stdout: sys.stdout.write(start_message) retry += 1 time.sleep(3) return False
def start(self, stdout=False): """ Start the ElasticSearch process :param stdout: Print output to console :return: True, if started successfully """ def start_shell_out(): subprocess.call( 'runuser -l dynamite -c "{} {}/bin/elasticsearch ' '-p /var/run/dynamite/elasticsearch/elasticsearch.pid --quiet &>/dev/null &"' .format(utilities.get_environment_file_str(), self.config.es_home), shell=True) if not os.path.exists('/var/run/dynamite/elasticsearch/'): subprocess.call( 'mkdir -p {}'.format('/var/run/dynamite/elasticsearch/'), shell=True) utilities.set_ownership_of_file('/var/run/dynamite', user='******', group='dynamite') if not utilities.check_pid(self.pid): Process(target=start_shell_out).start() else: sys.stderr.write( '[-] ElasticSearch is already running on PID [{}]\n'.format( self.pid)) return True retry = 0 self.pid = -1 time.sleep(5) while retry < 6: start_message = '[+] [Attempt: {}] Starting ElasticSearch on PID [{}]\n'.format( retry + 1, self.pid) try: with open('/var/run/dynamite/elasticsearch/elasticsearch.pid' ) as f: self.pid = int(f.read()) start_message = '[+] [Attempt: {}] Starting ElasticSearch on PID [{}]\n'.format( retry + 1, self.pid) if stdout: sys.stdout.write(start_message) if not utilities.check_pid(self.pid): retry += 1 time.sleep(5) else: return True except IOError: if stdout: sys.stdout.write(start_message) retry += 1 time.sleep(3) return False
def _install_kibana_objects(self): self.logger.info('Installing Kibana Dashboards') self.logger.info('Waiting for ElasticSearch to become accessible.') # Start ElasticSearch if it is installed locally and is not running if self.elasticsearch_host in ['localhost', '127.0.0.1', '0.0.0.0', '::1', '::/128']: self.logger.info('Starting ElasticSearch.') elastic_process.ProcessManager().start() while not elastic_profile.ProcessProfiler().is_listening(): self.logger.info('Waiting for ElasticSearch API to become accessible.') time.sleep(5) self.logger.info('ElasticSearch API is up.') self.logger.info('Sleeping for 5 seconds, while ElasticSearch API finishes booting.') time.sleep(5) try: kibana_proc = kibana_process.ProcessManager() kibana_proc.optimize() utilities.set_ownership_of_file(self.install_directory, user='******', group='dynamite') utilities.set_ownership_of_file(self.configuration_directory, user='******', group='dynamite') time.sleep(5) self.logger.info('Starting Kibana.') kibana_proc.start() except Exception as e: raise kibana_exceptions.InstallKibanaError("General error while starting Kibana process; {}".format(e)) kibana_api_start_attempts = 0 while not kibana_profile.ProcessProfiler().is_listening() and kibana_api_start_attempts != 5: self.logger.info('Waiting for Kibana API to become accessible.') kibana_api_start_attempts += 1 time.sleep(5) if kibana_api_start_attempts == 5: self.logger.error('Kibana API could not be started after {} attempts.'.format(kibana_api_start_attempts)) raise kibana_exceptions.InstallKibanaError( "Kibana API could not be started after {} attempts.".format(kibana_api_start_attempts)) self.logger.info('Kibana API is up.') self.logger.info('Sleeping for 10 seconds, while Kibana API finishes booting.') time.sleep(10) api_config = kibana_configs.ApiConfigManager(self.configuration_directory) kibana_object_create_attempts = 1 while kibana_object_create_attempts != 5: try: self.logger.info('[Attempt {}] Attempting to install dashboards/visualizations.'.format( kibana_object_create_attempts)) api_config.create_dynamite_kibana_objects() break except kibana_exceptions.CreateKibanaObjectsError: kibana_object_create_attempts += 1 time.sleep(10) if kibana_object_create_attempts == 5: self.logger.error( "Kibana objects could not be created after {} attempts".format(kibana_object_create_attempts)) raise kibana_exceptions.InstallKibanaError( "Kibana objects could not be created after {} attempts".format(kibana_object_create_attempts)) self.logger.info('Successfully created dashboards/visualizations.') kibana_proc.stop()
def __init__(self): self.environment_variables = utilities.get_environment_file_dict() self.configuration_directory = self.environment_variables.get('LS_PATH_CONF') self.config = LogstashConfigurator(self.configuration_directory) if not os.path.exists('/var/run/dynamite/logstash/'): subprocess.call('mkdir -p {}'.format('/var/run/dynamite/logstash/'), shell=True) utilities.set_ownership_of_file('/var/run/dynamite') try: self.pid = int(open('/var/run/dynamite/logstash/logstash.pid').read()) + 1 except (IOError, ValueError): self.pid = -1
def setup(self, inspect_interfaces: List[str]): """Setup Zeek Args: inspect_interfaces: A list of network interfaces to capture on (E.G ["mon0", "mon1"]) Returns: None """ if not self.skip_interface_validation: if not self.validate_inspect_interfaces(inspect_interfaces): raise install.NetworkInterfaceNotFound(inspect_interfaces) sysctl = systemctl.SystemCtl() self.install_zeek_dependencies() self.create_update_zeek_environment_variables() self.logger.debug(f'Creating directory: {self.configuration_directory}') utilities.makedirs(self.configuration_directory) self.logger.debug(f'Creating directory: {self.install_directory}') utilities.makedirs(self.install_directory) self.logger.info('Setting up Zeek from source. This can take up to 15 minutes.') if self.stdout: utilities.print_coffee_art() self.configure_compile_zeek() self.logger.info('Setting up Zeek package manager.') zkg_installer = zkg_install.InstallManager() zkg_installer.setup() package.InstallPackageManager(const.ZEEK_PACKAGES, stdout=self.stdout, verbose=self.verbose).setup() self.copy_file_or_directory_to_destination(f'{const.DEFAULT_CONFIGS}/zeek/broctl-nodes.cfg', f'{self.install_directory}/etc/node.cfg') self.copy_file_or_directory_to_destination(f'{const.DEFAULT_CONFIGS}/zeek/local.zeek', f'{self.configuration_directory}/site/local.zeek') # Optimize Configurations site_local_config = config.SiteLocalConfigManager(self.configuration_directory, stdout=self.stdout, verbose=self.verbose) node_config = config.NodeConfigManager(self.install_directory, stdout=self.stdout, verbose=self.verbose) node_config.workers = node.Workers() for worker in node_config.get_optimal_zeek_worker_config(inspect_interfaces): node_config.workers.add_worker( worker=worker ) self.logger.info('Applying node configuration.') node_config.commit() # Fix Permissions self.logger.info('Setting up file permissions.') utilities.set_ownership_of_file(self.configuration_directory, user='******', group='dynamite') utilities.set_ownership_of_file(self.install_directory, user='******', group='dynamite') self.logger.info(f'Installing service -> {const.DEFAULT_CONFIGS}/systemd/zeek.service') sysctl.install_and_enable(os.path.join(const.DEFAULT_CONFIGS, 'systemd', 'zeek.service'))
def optimize(self, stdout=False): if not os.path.exists('/var/run/dynamite/kibana/'): subprocess.call('mkdir -p {}'.format('/var/run/dynamite/kibana/'), shell=True) utilities.set_ownership_of_file('/var/run/dynamite', user='******', group='dynamite') if stdout: sys.stdout.write('[+] Optimizing Kibana Libraries.\n') # Kibana initially has to be called as root due to a process forking issue when using runuser # builtin subprocess.call('{}/bin/kibana --optimize --allow-root'.format( self.config.kibana_home, ), shell=True, env=utilities.get_environment_file_dict()) # Pass permissions back to dynamite user utilities.set_ownership_of_file('/var/log/dynamite', user='******', group='dynamite')
def setup_logstash_synesis(self, stdout=False): if stdout: sys.stdout.write( '[+] Creating synesis install|configuration directories.\n') subprocess.call('mkdir -p {}'.format(self.install_directory), shell=True) if stdout: sys.stdout.write('[+] Copying synesis configurations\n') utilities.copytree( os.path.join(const.DEFAULT_CONFIGS, 'logstash', 'suricata'), self.install_directory) utilities.set_ownership_of_file(self.install_directory, user='******', group='dynamite') if 'SYNLITE_SURICATA_DICT_PATH' not in open( '/etc/dynamite/environment').read(): dict_path = os.path.join(self.install_directory, 'dictionaries') if stdout: sys.stdout.write( '[+] Updating Synesis dictionary configuration path [{}]\n' .format(dict_path)) subprocess.call( 'echo SYNLITE_SURICATA_DICT_PATH="{}" >> /etc/dynamite/environment' .format(dict_path), shell=True) if 'SYNLITE_SURICATA_TEMPLATE_PATH' not in open( '/etc/dynamite/environment').read(): template_path = os.path.join(self.install_directory, 'templates') if stdout: sys.stdout.write( '[+] Updating Synesis template configuration path [{}]\n'. format(template_path)) subprocess.call( 'echo SYNLITE_SURICATA_TEMPLATE_PATH="{}" >> /etc/dynamite/environment' .format(template_path), shell=True) if 'SYNLITE_SURICATA_GEOIP_DB_PATH' not in open( '/etc/dynamite/environment').read(): geo_path = os.path.join(self.install_directory, 'geoipdbs') if stdout: sys.stdout.write( '[+] Updating Synesis geodb configuration path [{}]\n'. format(geo_path)) subprocess.call( 'echo SYNLITE_SURICATA_GEOIP_DB_PATH="{}" >> /etc/dynamite/environment' .format(geo_path), shell=True) SynesisConfigurator().write_environment_variables()
def _install_kibana_objects(self): if KibanaProfiler().is_installed and (ElasticProfiler().is_installed or self.elasticsearch_host != 'localhost'): if self.stdout: sys.stdout.write('[+] Installing Kibana Dashboards\n') if self.stdout: sys.stdout.write('[+] Waiting for ElasticSearch to become accessible.\n') # Start ElasticSearch if it is installed locally and is not running if self.elasticsearch_host in ['localhost', '127.0.0.1', '0.0.0.0', '::1', '::/128']: sys.stdout.write('[+] Starting ElasticSearch.\n') ElasticProcess().start(stdout=self.stdout) sys.stdout.flush() while not ElasticProfiler().is_listening: if self.stdout: sys.stdout.write('[+] Waiting for ElasticSearch API to become accessible.\n') time.sleep(5) if self.stdout: sys.stdout.write('[+] ElasticSearch API is up.\n') sys.stdout.write('[+] Sleeping for 10 seconds, while ElasticSearch API finishes booting.\n') sys.stdout.flush() time.sleep(10) kibana_process = KibanaProcess() kibana_process.optimize(stdout=self.stdout) utilities.set_ownership_of_file('/opt/dynamite/', user='******', group='dynamite') utilities.set_ownership_of_file('/etc/dynamite/', user='******', group='dynamite') time.sleep(5) sys.stdout.write('[+] Starting Kibana.\n') kibana_process.start(stdout=self.stdout) while not KibanaProfiler().is_listening: if self.stdout: sys.stdout.write('[+] Waiting for Kibana API to become accessible.\n') time.sleep(5) if self.stdout: sys.stdout.write('[+] Kibana API is up.\n') sys.stdout.write('[+] Sleeping for 15 seconds, while Kibana API finishes booting.\n') sys.stdout.flush() time.sleep(15) api_config = KibanaAPIConfigurator(self.configuration_directory) kibana_object_create_attempts = 1 while not api_config.create_elastiflow_saved_objects(): if self.stdout: sys.stdout.write('[+] Attempting to dashboards/visualizations [Attempt {}]\n'.format( kibana_object_create_attempts)) kibana_object_create_attempts += 1 time.sleep(10) if self.stdout: sys.stdout.write('[+] Successfully created dashboards/visualizations.\n') kibana_process.stop()
def _get_pid(pid_file): pid = None h, t = os.path.split(pid_file) utilities.makedirs(h, exist_ok=True) try: utilities.set_ownership_of_file(h) # PID file does not exist except IOError: pass # dynamite user does not exist except KeyError: pass try: with open(pid_file) as pid_f: pid = int(pid_f.read()) except (IOError, ValueError): pass return pid
def __init__(self, configuration_directory=CONFIGURATION_DIRECTORY): """ :param configuration_directory: Path to the configuration directory (E.G /etc/dynamite/logstash/) """ self.configuration_directory = configuration_directory self.config = LogstashConfigurator(self.configuration_directory) if not os.path.exists('/var/run/dynamite/logstash/'): subprocess.call( 'mkdir -p {}'.format('/var/run/dynamite/logstash/'), shell=True) utilities.set_ownership_of_file('/var/run/dynamite') try: self.pid = int( open('/var/run/dynamite/logstash/logstash.pid').read()) + 1 except (IOError, ValueError): self.pid = -1
def __init__(self, stdout=True, verbose=False): log_level = logging.INFO if verbose: log_level = logging.DEBUG self.logger = get_logger('LAB', level=log_level, stdout=stdout) self.environment_variables = utilities.get_environment_file_dict() self.configuration_directory = self.environment_variables.get( 'DYNAMITE_LAB_CONFIG') utilities.makedirs(PID_DIRECTORY, exist_ok=True) utilities.set_ownership_of_file(PID_DIRECTORY, user='******', group='dynamite') try: with open(os.path.join(PID_DIRECTORY, 'jupyterhub.pid')) as pid_f: self.pid = int(pid_f.read()) except (IOError, ValueError): self.pid = -1
def __init__(self, name: str, verbose: Optional[bool] = False, stdout: Optional[bool] = True, log_level=logging.INFO): """ Build a custom service installer Args: name: The name of the service stdout: Print output to console verbose: Include detailed debug messages log_level: The minimum logging.LOG_LEVEL to be handled """ if verbose: log_level = logging.DEBUG self.stdout = stdout self.verbose = verbose self.logger = get_logger(str(name).upper(), level=log_level, stdout=stdout) self.dynamite_environ = utilities.get_environment_file_dict() utilities.create_dynamite_user() utilities.makedirs(const.PID_PATH, exist_ok=True) utilities.set_ownership_of_file(const.PID_PATH, user='******', group='dynamite')
def setup_logstash(self): """ Create required directories, files, and variables to run LogStash successfully; """ self._create_logstash_directories() self._copy_logstash_files_and_directories() self._create_logstash_environment_variables() self._setup_default_logstash_configs() self._update_sysctl() self._setup_elastiflow() self._setup_synesis() self._install_logstash_plugins() shutil.copy( os.path.join(const.DEFAULT_CONFIGS, 'logstash', 'pipelines.yml'), os.path.join(self.configuration_directory, 'pipelines.yml')) utilities.set_ownership_of_file('/etc/dynamite/', user='******', group='dynamite') utilities.set_ownership_of_file('/opt/dynamite/', user='******', group='dynamite') utilities.set_ownership_of_file('/var/log/dynamite', user='******', group='dynamite')
def optimize(self) -> None: """Runs Kibana webpack optimizer among other things. Returns: None """ environ = utilities.get_environment_file_dict() if not os.path.exists(PID_DIRECTORY): utilities.makedirs(PID_DIRECTORY) utilities.set_ownership_of_file(PID_DIRECTORY, user='******', group='dynamite') self.logger.info('Optimizing Kibana Libraries.') # Kibana initially has to be called as root due to a process forking issue when using runuser # builtin subprocess.call('{}/bin/kibana --optimize --allow-root'.format( environ['KIBANA_HOME'], ), shell=True, env=utilities.get_environment_file_dict(), stderr=subprocess.PIPE, stdout=subprocess.PIPE) # Pass permissions back to dynamite user utilities.set_ownership_of_file(environ['KIBANA_LOGS'], user='******', group='dynamite') utilities.set_ownership_of_file(environ['KIBANA_HOME'], user='******', group='dynamite')
def setup_kibana(self): """ Create required directories, files, and variables to run ElasticSearch successfully; """ pacman = OSPackageManager(verbose=self.verbose) pacman.refresh_package_indexes() pacman.install_packages(['curl']) self._create_kibana_directories() self._copy_kibana_files_and_directories() self._create_kibana_environment_variables() self._setup_default_kibana_configs() self._install_kibana_objects() utilities.set_ownership_of_file('/etc/dynamite/', user='******', group='dynamite') utilities.set_ownership_of_file('/opt/dynamite/', user='******', group='dynamite') utilities.set_ownership_of_file('/var/log/dynamite', user='******', group='dynamite')
def setup_elasticsearch(self, stdout=False): """ Create required directories, files, and variables to run ElasticSearch successfully; Setup Java environment :param stdout: Print output to console """ self._create_elasticsearch_directories(stdout=stdout) self._copy_elasticsearch_files_and_directories(stdout=stdout) self._create_elasticsearch_environment_variables(stdout=stdout) self._setup_default_elasticsearch_configs(stdout=stdout) self._update_sysctl(stdout=stdout) utilities.set_ownership_of_file('/etc/dynamite/') utilities.set_ownership_of_file('/opt/dynamite/') utilities.set_ownership_of_file('/var/log/dynamite') self.setup_passwords(stdout=stdout)
def setup_kibana(self, stdout=False): """ Create required directories, files, and variables to run ElasticSearch successfully; :param stdout: Print output to console """ pacman = OSPackageManager() pacman.refresh_package_indexes() pacman.install_packages(['curl']) self._create_kibana_directories(stdout=stdout) self._copy_kibana_files_and_directories(stdout=stdout) self._create_kibana_environment_variables(stdout=stdout) self._setup_default_kibana_configs(stdout=stdout) self._install_kibana_objects(stdout=stdout) utilities.set_ownership_of_file('/etc/dynamite/') utilities.set_ownership_of_file('/opt/dynamite/') utilities.set_ownership_of_file('/var/log/dynamite')
def setup_elasticsearch(self): """ Create required directories, files, and variables to run ElasticSearch successfully; Setup Java environment """ self._create_elasticsearch_directories() self._copy_elasticsearch_files_and_directories() self._create_elasticsearch_environment_variables() self._setup_default_elasticsearch_configs() self._update_sysctl() try: utilities.set_ownership_of_file(self.configuration_directory, user='******', group='dynamite') utilities.set_ownership_of_file(self.install_directory, user='******', group='dynamite') utilities.set_ownership_of_file(self.log_directory, user='******', group='dynamite') except Exception as e: self.logger.error( "General error occurred while attempting to set permissions on root directories." ) self.logger.debug( "General error occurred while attempting to set permissions on root directories; {}" .format(e)) raise elastic_exceptions.InstallElasticsearchError( "General error occurred while attempting to set permissions on root directories; {}" .format(e)) try: sysctl = systemctl.SystemCtl() except general_exceptions.CallProcessError: raise elastic_exceptions.InstallElasticsearchError( "Could not find systemctl.") self.logger.info("Installing ElasticSearch systemd Service.") if not sysctl.install_and_enable( os.path.join(const.DEFAULT_CONFIGS, 'systemd', 'elasticsearch.service')): raise elastic_exceptions.InstallElasticsearchError( "Failed to install ElasticSearch systemd service.") self.setup_passwords()
def setup_elasticsearch(self): """ Create required directories, files, and variables to run ElasticSearch successfully; Setup Java environment """ self._create_elasticsearch_directories() self._copy_elasticsearch_files_and_directories() self._create_elasticsearch_environment_variables() self._setup_default_elasticsearch_configs() self._update_sysctl() utilities.set_ownership_of_file('/etc/dynamite/', user='******', group='dynamite') utilities.set_ownership_of_file('/opt/dynamite/', user='******', group='dynamite') utilities.set_ownership_of_file('/var/log/dynamite', user='******', group='dynamite') self.setup_passwords()
def setup_kibana(self): """ Create required directories, files, and variables to run ElasticSearch successfully; """ try: pacman = package_manager.OSPackageManager(stdout=self.stdout, verbose=self.verbose) except general_exceptions.InvalidOsPackageManagerDetectedError: self.logger.error("No valid OS package manager detected.") raise kibana_exceptions.InstallKibanaError("No valid OS package manager detected.") try: pacman.refresh_package_indexes() pacman.install_packages(['curl']) except (general_exceptions.OsPackageManagerInstallError, general_exceptions.OsPackageManagerRefreshError): self.logger.error("Failed to install one or more packages; {}".format(["curl"])) raise kibana_exceptions.InstallKibanaError("Failed to install one or more packages; {}".format(["curl"])) self._create_kibana_directories() self._copy_kibana_files_and_directories() self._create_kibana_environment_variables() self._setup_default_kibana_configs() try: utilities.set_ownership_of_file(self.configuration_directory, user='******', group='dynamite') utilities.set_ownership_of_file(self.install_directory, user='******', group='dynamite') utilities.set_ownership_of_file(self.log_directory, user='******', group='dynamite') except Exception as e: self.logger.error("General error occurred while attempting to set permissions on root directories.") self.logger.debug( "General error occurred while attempting to set permissions on root directories; {}".format(e)) raise kibana_exceptions.InstallKibanaError( "General error occurred while attempting to set permissions on root directories; {}".format(e)) try: sysctl = systemctl.SystemCtl() except general_exceptions.CallProcessError: raise kibana_exceptions.InstallKibanaError("Could not find systemctl.") self.logger.info("Installing Kibana systemd Service.") if not sysctl.install_and_enable(os.path.join(const.DEFAULT_CONFIGS, 'systemd', 'kibana.service')): raise kibana_exceptions.InstallKibanaError("Failed to install Kibana systemd service.") self._install_kibana_objects()
def setup(self, node_name: Optional[str] = None, host: Optional[str] = None, elasticsearch_host: Optional[str] = None, elasticsearch_port: Optional[int] = None, pipeline_batch_size: Optional[int] = None, pipeline_batch_delay: Optional[int] = None, heap_size_gigs: Optional[int] = None): sysctl = systemctl.SystemCtl() # System patching and directory setup self.logger.debug('Patching sysctl.') utilities.update_sysctl() self.logger.debug('Patching file-handle limits.') utilities.update_user_file_handle_limits() utilities.makedirs(self.configuration_directory) utilities.makedirs(self.install_directory) utilities.makedirs(self.log_directory) self.copy_logstash_fills_and_directories() self.create_update_logstash_environment_variables() # Overwrite with dynamite default configurations self.copy_file_or_directory_to_destination(f'{const.DEFAULT_CONFIGS}/logstash/logstash.yml', self.configuration_directory) self.copy_file_or_directory_to_destination(f'{const.DEFAULT_CONFIGS}/logstash/jvm.options', self.configuration_directory) # Optimize Configurations ls_main_config = config.ConfigManager(self.configuration_directory) ls_java_config = config.JavaHeapOptionsConfigManager(self.configuration_directory) ls_main_config.path_logs = self.log_directory if not node_name: node_name = utilities.get_default_es_node_name().replace('es_node', 'ls_node') if not host: host = utilities.get_primary_ip_address() if not elasticsearch_host: elasticsearch_host = utilities.get_primary_ip_address() if not elasticsearch_port: elasticsearch_port = 9200 if not pipeline_batch_size: pipeline_batch_size = 125 if not pipeline_batch_delay: pipeline_batch_delay = 50 if not heap_size_gigs: heap_size_gigs = int((utilities.get_memory_available_bytes() / 10 ** 9) / 2) self.logger.debug(f'Logstash will connect to Elasticsearch on {elasticsearch_host}:{elasticsearch_port}') ls_main_config.node_name = node_name ls_main_config.host = host ls_main_config.pipeline_batch_size = pipeline_batch_size ls_main_config.pipeline_batch_delay = pipeline_batch_delay self.create_update_env_variable('LS_ES_HOST', elasticsearch_host) self.create_update_env_variable('LS_ES_PORT', elasticsearch_port) ls_java_config.initial_memory = f'{heap_size_gigs}g' ls_java_config.maximum_memory = f'{heap_size_gigs}g' self.logger.debug(f'Java Heap Initial & Max Memory = {heap_size_gigs} GB') ls_main_config.commit() ls_java_config.commit() self.logger.info('Applying configuration.') # Fix Permissions self.logger.info('Setting up file permissions.') utilities.set_ownership_of_file(self.configuration_directory, user='******', group='dynamite') utilities.set_ownership_of_file(self.install_directory, user='******', group='dynamite') utilities.set_ownership_of_file(self.log_directory, user='******', group='dynamite') # Install and enable service self.logger.info(f'Installing service -> {const.DEFAULT_CONFIGS}/systemd/logstash.service') sysctl.install_and_enable(f'{const.DEFAULT_CONFIGS}/systemd/logstash.service')
def setup(self, host: Optional[str] = None, port: Optional[int] = None, elasticsearch_targets: Optional[List[str]] = None) -> None: """Setup Kibana Args: host: The IP or hostname to listen on port: The port to listen on elasticsearch_targets: A list of Elasticsearch urls Returns: None """ sysctl = systemctl.SystemCtl() # Directory setup self.logger.debug( f'Creating directory: {self.configuration_directory}') utilities.makedirs(self.configuration_directory) self.logger.debug(f'Creating directory: {self.install_directory}') utilities.makedirs(self.install_directory) self.logger.debug(f'Creating directory: {self.log_directory}') utilities.makedirs(self.log_directory) self.copy_kibana_files_and_directories() self.create_update_kibana_environment_variables() self.copy_file_or_directory_to_destination( f'{const.DEFAULT_CONFIGS}/kibana/kibana.yml', self.configuration_directory) # Optimize Configurations kb_main_config = config.ConfigManager(self.configuration_directory) if not host: host = utilities.get_primary_ip_address() if not port: port = 5601 if not elasticsearch_targets: elasticsearch_targets = [ f'https://{utilities.get_primary_ip_address()}:9200' ] self.logger.debug(f'Elasticsearch Targets = {elasticsearch_targets}') kb_main_config.host = host kb_main_config.port = port self.logger.debug( f'Kibana will listen on {kb_main_config.host}:{kb_main_config.port}' ) kb_main_config.elasticsearch_targets = elasticsearch_targets self.logger.info('Applying configuration.') kb_main_config.commit() # Fix Permissions utilities.set_ownership_of_file(self.configuration_directory, user='******', group='dynamite') utilities.set_ownership_of_file(self.install_directory, user='******', group='dynamite') utilities.set_ownership_of_file(self.log_directory, user='******', group='dynamite') # Install and enable service self.logger.info( f'Installing service -> {const.DEFAULT_CONFIGS}/systemd/kibana.service' ) sysctl.install_and_enable( f'{const.DEFAULT_CONFIGS}/systemd/kibana.service') self.logger.info('Installing "BaseViews" Kibana package') task = install_dynamite_base_views.InstallKibanaDynamiteBaseViewsPackage( username='******', password='******', target=f"http://{host}:{port}") task.download_and_install()
def setup_logstash_elastiflow(self): """ Create required environmental variables; copy configurations to various directories. """ env_file = os.path.join(const.CONFIG_PATH, 'environment') self.logger.info( 'Creating ElastiFlow installation and configuration directories.') utilities.makedirs(self.install_directory, exist_ok=True) self.logger.info('Copying ElastiFlow configurations.') utilities.copytree( os.path.join(const.DEFAULT_CONFIGS, 'logstash', 'zeek'), self.install_directory) utilities.set_ownership_of_file(self.install_directory, user='******', group='dynamite') try: with open(env_file) as env_f: env_str = env_f.read() if 'ELASTIFLOW_DICT_PATH' not in env_str: dict_path = os.path.join(self.install_directory, 'dictionaries') self.logger.info( 'Updating ElastiFlow dictionary configuration path [{}]' .format(dict_path)) subprocess.call( 'echo ELASTIFLOW_DICT_PATH="{}" >> {}'.format( dict_path, env_file), shell=True) if 'ELASTIFLOW_TEMPLATE_PATH' not in env_str: template_path = os.path.join(self.install_directory, 'templates') self.logger.info( 'Updating ElastiFlow template configuration path [{}]'. format(template_path)) subprocess.call( 'echo ELASTIFLOW_TEMPLATE_PATH="{}" >> {}'.format( template_path, env_file), shell=True) if 'ELASTIFLOW_GEOIP_DB_PATH' not in env_str: geo_path = os.path.join(self.install_directory, 'geoipdbs') self.logger.info( 'Updating ElastiFlow GeoDBs configuration path [{}]'. format(geo_path)) subprocess.call( 'echo ELASTIFLOW_GEOIP_DB_PATH="{}" >> {}'.format( geo_path, env_file), shell=True) if 'ELASTIFLOW_DEFINITION_PATH' not in env_str: def_path = os.path.join(self.install_directory, 'definitions') self.logger.info( 'Updating ElastiFlow definitions configuration path [{}]' .format(def_path)) subprocess.call( 'echo ELASTIFLOW_DEFINITION_PATH="{}" >> {}'.format( def_path, env_file), shell=True) except Exception as e: self.logger.error( 'Failed to read ElastiFlow environment variables.') self.logger.debug( "Failed to read ElastiFlow environment variables; {}".format( e)) raise elastiflow_exceptions.InstallElastiflowError( "Failed to read ElastiFlow environment variables; {}".format( e)) try: elastiflow_config.ConfigManager().write_environment_variables() except (elastiflow_exceptions.ReadElastiflowConfigError, elastiflow_exceptions.WriteElastiflowConfigError): self.logger.error( 'Failed to read/write ElastiFlow environment variables.') raise elastiflow_exceptions.InstallElastiflowError( "Could not read/write ElastiFlow environment variables.")
def setup_passwords(self, stdout=False): env_dict = utilities.get_environment_file_dict() def setup_from_bootstrap(s): bootstrap_users_and_passwords = {} for line in s.split('\n'): if 'PASSWORD' in line: _, user, _, password = line.split(' ') if not isinstance(password, str): password = password.decode() bootstrap_users_and_passwords[user] = password es_pass_config = ElasticPasswordConfigurator( auth_user='******', current_password=bootstrap_users_and_passwords['elastic']) return es_pass_config.set_all_passwords(new_password=self.password, stdout=True) if not ElasticProfiler().is_installed: sys.stderr.write( '[-] ElasticSearch must be installed and running to bootstrap passwords.\n' ) return False sys.stdout.write('[+] Creating certificate keystore\n') subprocess.call('mkdir -p {}'.format( os.path.join(self.configuration_directory, 'config')), shell=True) es_cert_util = os.path.join(self.install_directory, 'bin', 'elasticsearch-certutil') es_cert_keystore = os.path.join(self.configuration_directory, 'config', 'elastic-certificates.p12') cert_p = subprocess.Popen( [es_cert_util, 'cert', '-out', es_cert_keystore, '-pass', ''], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE, env=env_dict) cert_p_res = cert_p.communicate() if not os.path.exists(es_cert_keystore): sys.stderr.write( '[-] Failed to setup SSL certificate keystore: \noutput: {}\n\t' .format(cert_p_res)) return False utilities.set_ownership_of_file( os.path.join(self.configuration_directory, 'config')) if not ElasticProfiler().is_running: ElasticProcess().start(stdout=stdout) sys.stdout.flush() while not ElasticProfiler().is_listening: if stdout: sys.stdout.write( '[+] Waiting for ElasticSearch API to become accessible.\n' ) time.sleep(5) if stdout: sys.stdout.write('[+] ElasticSearch API is up.\n') sys.stdout.write( '[+] Sleeping for 10 seconds, while ElasticSearch API finishes booting.\n' ) sys.stdout.flush() sys.stdout.write('[+] Bootstrapping passwords.\n') es_password_util = os.path.join(self.install_directory, 'bin', 'elasticsearch-setup-passwords') bootstrap_p = subprocess.Popen([es_password_util, 'auto'], cwd=self.configuration_directory, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE, env=env_dict) bootstrap_p_res = bootstrap_p.communicate(input=b'y\n') if not bootstrap_p_res: sys.stderr.write('[-] Failed to setup new passwords\n') return False if not isinstance(bootstrap_p_res[0], str): return setup_from_bootstrap(bootstrap_p_res[0].decode()) else: return setup_from_bootstrap(bootstrap_p_res[0])
def setup_logstash(self): """ Create required directories, files, and variables to run LogStash successfully; """ self._create_logstash_directories() self._copy_logstash_files_and_directories() self._create_logstash_environment_variables() self._setup_default_logstash_configs() self._update_sysctl() self._setup_elastiflow() self._setup_synesis() self._install_logstash_plugins() try: shutil.copy( os.path.join(const.DEFAULT_CONFIGS, 'logstash', 'pipelines.yml'), os.path.join(self.configuration_directory, 'pipelines.yml')) except Exception as e: raise logstash_exceptions.InstallLogstashError( "General error while copying pipeline.yml file; {}".format(e)) try: utilities.makedirs(self.install_directory, exist_ok=True) utilities.makedirs(self.configuration_directory, exist_ok=True) utilities.makedirs(self.log_directory, exist_ok=True) except Exception as e: raise logstash_exceptions.InstallLogstashError( "General error occurred while attempting to create root directories; {}" .format(e)) try: utilities.set_ownership_of_file(self.configuration_directory, user='******', group='dynamite') utilities.set_ownership_of_file(self.install_directory, user='******', group='dynamite') utilities.set_ownership_of_file(self.log_directory, user='******', group='dynamite') except Exception as e: self.logger.error( "General error occurred while attempting to set permissions on root directories." ) self.logger.debug( "General error occurred while attempting to set permissions on root directories; {}" .format(e)) raise logstash_exceptions.InstallLogstashError( "General error occurred while attempting to set permissions on root directories; {}" .format(e)) try: sysctl = systemctl.SystemCtl() except general_exceptions.CallProcessError: raise logstash_exceptions.InstallLogstashError( "Could not find systemctl.") self.logger.info("Installing LogStash systemd Service.") if not sysctl.install_and_enable( os.path.join(const.DEFAULT_CONFIGS, 'systemd', 'logstash.service')): raise logstash_exceptions.InstallLogstashError( "Failed to install LogStash systemd service.")
def setup_dynamite_sdk(self): """ Sets up sdk files; and installs globally """ if self.stdout: sys.stdout.write('[+] Copying DynamiteSDK into lab environment.\n') sys.stdout.flush() subprocess.call('mkdir -p {}'.format(self.notebook_home), shell=True) if 'NOTEBOOK_HOME' not in open('/etc/dynamite/environment').read(): if self.stdout: sys.stdout.write( '[+] Updating Notebook home path [{}]\n'.format( self.notebook_home)) subprocess.call( 'echo NOTEBOOK_HOME="{}" >> /etc/dynamite/environment'. format(self.notebook_home), shell=True) subprocess.call('mkdir -p {}'.format(self.configuration_directory), shell=True) if 'DYNAMITE_LAB_CONFIG' not in open( '/etc/dynamite/environment').read(): if self.stdout: sys.stdout.write( '[+] Updating Dynamite Lab Config path [{}]\n'.format( self.configuration_directory)) subprocess.call( 'echo DYNAMITE_LAB_CONFIG="{}" >> /etc/dynamite/environment'. format(self.configuration_directory), shell=True) sdk_install_cache = os.path.join(const.INSTALL_CACHE, const.DYNAMITE_SDK_DIRECTORY_NAME) utilities.copytree(os.path.join(sdk_install_cache, 'notebooks'), self.notebook_home) shutil.copy( os.path.join(sdk_install_cache, 'dynamite_sdk', 'config.cfg.example'), os.path.join(self.configuration_directory, 'config.cfg')) utilities.set_ownership_of_file(self.notebook_home, user='******', group='jupyter') if self.stdout: sys.stdout.write( '[+] Installing dynamite-sdk-lite (https://github.com/DynamiteAI/dynamite-sdk-lite)\n' ) sys.stdout.write( '[+] Depending on your distribution it may take some time to install all requirements.\n' ) sys.stdout.flush() if self.verbose: p = subprocess.Popen(['python3', 'setup.py', 'install'], cwd=sdk_install_cache) else: p = subprocess.Popen(['python3', 'setup.py', 'install'], cwd=sdk_install_cache, stdout=subprocess.PIPE, stderr=subprocess.PIPE) p.communicate() dynamite_sdk_config = DynamiteLabConfigurator( configuration_directory=self.configuration_directory) dynamite_sdk_config.elasticsearch_url = 'http://{}:{}'.format( self.elasticsearch_host, self.elasticsearch_port) dynamite_sdk_config.elasticsearch_user = '******' dynamite_sdk_config.elasticsearch_password = self.elasticsearch_password dynamite_sdk_config.write_config()
def setup(self, targets: List[str], target_type: Optional[str] = 'elasticsearch', monitor_log_paths: Optional[List[str]] = None, agent_tag: Optional[str] = None) -> None: """Setup Filebeat Args: targets: A list of Elasticsearch/Kafka/Logstash targets to forward events to (E.G ["192.168.0.9 5044", ...]) target_type: The target type; current supported: elasticsearch (default), logstash, kafka, redis monitor_log_paths: A tuple of log paths to monitor agent_tag: A friendly name for the agent (defaults to the hostname with no spaces and _agt suffix) Returns: None """ from dynamite_nsm.services.zeek import profile as zeek_profile from dynamite_nsm.services.suricata import profile as suricata_profile sysctl = systemctl.SystemCtl() zeek_log_root, suricata_log_root = None, None # Directory setup self.logger.debug(f'Creating directory: {self.install_directory}') utilities.makedirs(self.install_directory) utilities.makedirs(f'{self.install_directory}/logs') self.logger.info('Installing files and directories.') self.copy_filebeat_files_and_directories() self.copy_file_or_directory_to_destination(f'{const.DEFAULT_CONFIGS}/filebeat/filebeat.yml', self.install_directory) # Overwrite with dynamite default configurations self.copy_file_or_directory_to_destination(f'{const.DEFAULT_CONFIGS}/filebeat/module/', self.install_directory) self.copy_file_or_directory_to_destination(f'{const.DEFAULT_CONFIGS}/filebeat/modules.d/', self.install_directory) filebeat_config = config.ConfigManager(self.install_directory, verbose=self.verbose, stdout=self.stdout) if target_type == 'elasticsearch': filebeat_config.switch_to_elasticsearch_target() filebeat_config.elasticsearch_targets.target_strings = targets self.logger.info(f'Enabling Elasticsearch connector: ' f'{filebeat_config.elasticsearch_targets.target_strings}') elif target_type == 'logstash': filebeat_config.switch_to_logstash_target() filebeat_config.logstash_targets.target_strings = targets elif target_type == 'kafka': filebeat_config.switch_to_kafka_target() filebeat_config.kafka_targets.target_strings = targets elif target_type == 'redis': filebeat_config.switch_to_redis_target() filebeat_config.redis_targets.target_strings = targets filebeat_config.input_logs = misc_filebeat_objs.InputLogs( monitor_log_paths=[] ) filebeat_config.field_processors.originating_agent_tag = agent_tag if not monitor_log_paths: environ = utilities.get_environment_file_dict() zeek_log_root = f'{environ.get("ZEEK_HOME", "")}/logs/current/' suricata_log_root = environ.get('SURICATA_LOGS', '') zeek_profiler = zeek_profile.ProcessProfiler() suricata_profiler = suricata_profile.ProcessProfiler() if zeek_profiler.is_installed(): self.logger.info(f'Zeek installation found; monitoring: {zeek_log_root}*.log') filebeat_config.input_logs.monitor_log_paths.append(f'{zeek_log_root}*.log') if suricata_profiler.is_installed(): self.logger.info(f'Suricata installation found; monitoring: {suricata_log_root}/eve.json') filebeat_config.input_logs.monitor_log_paths.append(f'{suricata_log_root}/eve.json') else: filebeat_config.input_logs = misc_filebeat_objs.InputLogs( monitor_log_paths=monitor_log_paths ) self.logger.info(f'Monitoring Paths = {filebeat_config.input_logs.monitor_log_paths}') if not agent_tag: filebeat_config.field_processors.originating_agent_tag = utilities.get_default_agent_tag() self.logger.info(f'Agent Tag = {filebeat_config.field_processors.originating_agent_tag}') self.logger.debug(filebeat_config.elasticsearch_targets.get_raw()) filebeat_config.commit() self.logger.info('Applying configuration.') # Fix Permissions self.logger.info('Installing modules.') filebeat_config.patch_modules(zeek_log_directory=zeek_log_root, suricata_log_directory=suricata_log_root) # Setting up permissions self.logger.info('Setting up file permissions.') config_file = f'{self.install_directory}/filebeat.yml' utilities.set_ownership_of_file(self.install_directory, user='******', group='dynamite') utilities.set_permissions_of_file(f'{self.install_directory}/modules.d/', unix_permissions_integer='go-w') utilities.set_permissions_of_file(f'{self.install_directory}/module/', unix_permissions_integer='go-w') utilities.set_ownership_of_file(config_file, user='******', group='dynamite') utilities.set_permissions_of_file(config_file, unix_permissions_integer=644) filebeat_config.enable_ecs_normalization() # Install and enable service self.logger.info(f'Installing service -> {const.DEFAULT_CONFIGS}/systemd/filebeat.service') sysctl.install_and_enable(f'{const.DEFAULT_CONFIGS}/systemd/filebeat.service') # Update environment file self.create_update_filebeat_environment_variables()