def reload_new_conf(report_value, reg1, reg2): """" Return a new ossec configuration with a changed report_value Parameters ---------- report_value: str Value that will be used for the report_changes option. reg1: str Registry path that will be written in the configuration for WINDOWS_REGISTRY_1. reg2: str Registry path that will be written in the configuration for WINDOWS_REGISTRY_2. """ new_conf_params = { 'WINDOWS_REGISTRY_1': reg1, 'WINDOWS_REGISTRY_2': reg2, 'REPORT_CHANGES_1': report_value, 'REPORT_CHANGES_2': report_value } conf_params, conf_metadata = generate_params(extra_params=new_conf_params, modes=['scheduled']) new_conf = load_wazuh_configurations(configurations_path, __name__, params=conf_params, metadata=conf_metadata) # Load the third configuration in the yaml restart_wazuh_with_new_conf( set_section_wazuh_conf(new_conf[2].get('sections'))) # Wait for FIM scan to finish detect_initial_scan(wazuh_log_monitor)
def test_audit_key(audit_key, path, get_configuration, configure_environment, restart_syscheckd): """Checks <audit_key> functionality by adding a audit rule and checking if alerts with that key are triggered when a file is created. This test is intended to be used with valid configurations Parameters ---------- audit_key : str Name of the audit_key to monitor path : str Path of the folder to be monitored """ check_apply_test({audit_key}, get_configuration['tags']) # Add watch rule os.system("auditctl -w " + path + " -p wa -k " + audit_key) # Restart and for wazuh truncate_file(LOG_FILE_PATH) control_service('restart') wazuh_log_monitor = FileMonitor(LOG_FILE_PATH) detect_initial_scan(wazuh_log_monitor) # Look for audit_key word create_file(REGULAR, path, "testfile") events = wazuh_log_monitor.start(timeout=30, callback=callback_audit_key, accum_results=1).result() assert audit_key in events # Remove watch rule os.system("auditctl -W " + path + " -p wa -k " + audit_key)
def test_new_directory(tags_to_apply, get_configuration, configure_environment, restart_syscheckd, wait_for_initial_scan): """ Check that a new monitored directory generates events after the next scheduled scan. This test performs the following steps: - Monitor a directory that does not exist. - Create the directory with files inside. Check that this does not produce events in ossec.log. - Move time forward to the next scheduled scan. - Check that now creating files within the directory do generate events. Parameters ---------- tags_to_apply : set Run test if matches with a configuration identifier, skip otherwise """ check_apply_test(tags_to_apply, get_configuration['tags']) if sys.platform != 'win32': # Create the monitored directory with files and check that events are not raised regular_file_cud(directory_str, wazuh_log_monitor, file_list=['file1', 'file2', 'file3'], min_timeout=global_parameters.default_timeout, triggers_event=False) # Travel to the future to start next scheduled scan check_time_travel(True) detect_initial_scan(wazuh_log_monitor) else: os.makedirs(directory_str, exist_ok=True, mode=0o777) time.sleep(1) # Assert that events of new CUD actions are raised after next scheduled scan regular_file_cud(directory_str, wazuh_log_monitor, file_list=['file4', 'file5', 'file6'], min_timeout=global_parameters.default_timeout, triggers_event=True)
def wait_for_fim_start_sync_disabled(request): """ Wait for en of initial FIM scan. If detect_realtime_start is used, the synchronization event is skipped and the test fails. """ file_monitor = getattr(request.module, 'wazuh_log_monitor') detect_initial_scan(file_monitor)
def test_skip_proc(get_configuration, configure_environment, restart_syscheckd, wait_for_initial_scan): """Check if syscheckd skips /proc when setting 'skip_proc="yes"'.""" check_apply_test({'skip_proc'}, get_configuration['tags']) trigger = get_configuration['metadata']['skip'] == 'no' if trigger: proc = subprocess.Popen([ "python3", f"{os.path.dirname(os.path.abspath(__file__))}/data/proc.py" ]) # Change configuration, monitoring the PID path in /proc # Monitor only /proc/PID to expect only these events. Otherwise, it will fail due to Timeouts since # integrity scans will take too long new_conf = change_conf(f'/proc/{proc.pid}') new_ossec_conf = [] # Get new skip_proc configuration for conf in new_conf: if conf['metadata']['skip'] == 'no' and conf['tags'] == [ 'skip_proc' ]: new_ossec_conf = set_section_wazuh_conf(conf.get('sections')) restart_wazuh_with_new_conf(new_ossec_conf) truncate_file(LOG_FILE_PATH) proc_monitor = FileMonitor(LOG_FILE_PATH) detect_initial_scan(proc_monitor) # Do not expect any 'Sending event' with pytest.raises(TimeoutError): proc_monitor.start( timeout=3, callback=callback_detect_event, error_message= 'Did not receive expected "Sending FIM event: ..." event') check_time_travel(time_travel=True, monitor=wazuh_log_monitor) found_event = False while not found_event: event = proc_monitor.start( timeout=5, callback=callback_detect_event, error_message='Did not receive expected ' '"Sending FIM event: ..." event').result() if f'/proc/{proc.pid}/' in event['data'].get('path'): found_event = True # Kill the process subprocess.Popen(["kill", "-9", str(proc.pid)]) else: with pytest.raises(TimeoutError): event = wazuh_log_monitor.start( timeout=3, callback=callback_detect_integrity_state) raise AttributeError(f'Unexpected event {event}')
def restart_syscheckd_each_time(request): control_service('stop', daemon='ossec-syscheckd') truncate_file(LOG_FILE_PATH) file_monitor = FileMonitor(LOG_FILE_PATH) setattr(request.module, 'wazuh_log_monitor', file_monitor) if not os.path.exists(testdir): os.mkdir(testdir) control_service('start', daemon='ossec-syscheckd') detect_initial_scan(file_monitor)
def detect_fim_scan(file_monitor): """ Detect initial scan when restarting Wazuh. Parameters ---------- file_monitor : FileMonitor File log monitor to detect events """ detect_initial_scan(file_monitor) if sys.platform == 'win32': time.sleep(5)
def test_multiple_dirs(dir_list, tags_to_apply, get_configuration, configure_environment, restart_syscheckd): """ Check if syscheck can detect every event when adding, modifying and deleting a file within multiple monitored directories. Check that the maximum number of monitored directories are processed correctly, generating a warning, and discarding the excess. These directories will be added in one single entry like so: <directories>testdir0, testdir1, ..., testdirn</directories> Parameters ---------- dir_list : list List with all the directories to be monitored. """ check_apply_test(tags_to_apply, get_configuration['tags']) discarded = wait_for_event() assert discarded == expected_discarded, f'Directories discarded expected to be: {discarded}' if get_configuration['metadata']['fim_mode'] == 'realtime': detect_realtime_start(wazuh_log_monitor) elif get_configuration['metadata']['fim_mode'] == 'whodata': detect_whodata_start(wazuh_log_monitor) else: # scheduled detect_initial_scan(wazuh_log_monitor) file = 'regular' scheduled = get_configuration['metadata']['fim_mode'] == 'scheduled' whodata = get_configuration['metadata']['fim_mode'] == 'whodata' try: multiple_dirs_test(mode="dirs", dir_list=dir_list, file=file, scheduled=scheduled, whodata=whodata, log_monitor=wazuh_log_monitor, timeout=2 * global_parameters.default_timeout) except TimeoutError as e: if whodata: pytest.xfail( reason= 'Xfailed due to issue: https://github.com/wazuh/wazuh/issues/4731' ) else: raise e
def check_when_no_report_changes(name, directory, fim_mode, new_conf): """ Restart Wazuh without report_changes :param name: File name :param directory: File directory :param fim_mode: FIM mode (scheduled, realtime, whodata) :param new_conf: New configuration to apply to syscheck :return: """ diff_file = create_and_check_diff(name, directory, fim_mode) restart_wazuh_with_new_conf(new_conf) # Wait for FIM scan to finish detect_initial_scan(wazuh_log_monitor) assert not os.path.exists(diff_file), f'{diff_file} exists'
def wait_for_fim_start(get_configuration, request): """ Wait for realtime start, whodata start or end of initial FIM scan. """ file_monitor = getattr(request.module, 'wazuh_log_monitor') mode_key = 'fim_mode' if 'fim_mode2' not in get_configuration['metadata'] else 'fim_mode2' try: if get_configuration['metadata'][mode_key] == 'realtime': detect_realtime_start(file_monitor) elif get_configuration['metadata'][mode_key] == 'whodata': detect_whodata_start(file_monitor) else: # scheduled detect_initial_scan(file_monitor) except KeyError: detect_initial_scan(file_monitor)
def test_audit_key(audit_key, path, get_configuration, configure_environment, restart_syscheckd): """Check `<audit_key>` functionality by adding a audit rule and checking if alerts with that key are triggered when a file is created. Args: audit_key (str): Name of the audit_key to monitor. tags_to_apply (set): Run test if matches with a configuration identifier, skip otherwise. get_configuration (fixture): Gets the current configuration of the test. configure_environment (fixture): Configure the environment for the execution of the test. restart_syscheckd (fixture): Restarts syscheck. wait_for_fim_start (fixture): Waits until the first FIM scan is completed. Raises: TimeoutError: If an expected event couldn't be captured. ValueError: If the path of the event is wrong. """ logger.info('Applying the test configuration') check_apply_test({audit_key}, get_configuration['tags']) # Add watch rule add_rule_command = "auditctl -w " + path + " -p wa -k " + audit_key os.system(add_rule_command) # Restart and for wazuh control_service('stop') truncate_file(fim.LOG_FILE_PATH) wazuh_log_monitor = FileMonitor(fim.LOG_FILE_PATH) control_service('start') fim.detect_initial_scan(wazuh_log_monitor) # Look for audit_key word fim.create_file(fim.REGULAR, path, "testfile") events = wazuh_log_monitor.start(timeout=30, callback=fim.callback_audit_key, accum_results=1, error_message=f'Did not receive expected "Match audit_key ..." event ' f'with the command {" ".join(add_rule_command)}').result() assert audit_key in events # Remove watch rule os.system("auditctl -W " + path + " -p wa -k " + audit_key)
def test_prefilter_cmd(tags_to_apply, get_configuration, configure_environment, check_prelink): """ Checks if prelink is installed and syscheck works This test was implemented when prefilter_cmd could only be set with 'prelink'. This test will have to updated if prefilter_cmd is updated as well. * This test is intended to be used with valid prefilter configuration. Each execution of this test will configure the environment properly, restart the service and wait for the initial scan. """ check_apply_test(tags_to_apply, get_configuration['tags']) if get_configuration['metadata'][ 'prefilter_cmd'] == '/usr/sbin/prelink -y': prelink = get_configuration['metadata']['prefilter_cmd'].split(' ')[0] assert os.path.exists(prelink), f'Prelink is not installed' truncate_file(LOG_FILE_PATH) restart_wazuh_daemon('ossec-syscheckd') detect_initial_scan(wazuh_log_monitor)
def test_sync_interval(get_configuration, configure_environment, restart_syscheckd): """Verify that synchronization checks take place at the expected time given SYNC_INTERVAL variable. This test is intended to be used with valid configurations files. Each execution of this test will configure the environment properly and restart the service. """ def truncate_log(): truncate_file(LOG_FILE_PATH) return FileMonitor(LOG_FILE_PATH) # Check if the test should be skipped check_apply_test({'sync_interval'}, get_configuration['tags']) wazuh_log_monitor = truncate_log() detect_initial_scan(wazuh_log_monitor) wazuh_log_monitor.start(timeout=5, callback=callback_detect_synchronization) wazuh_log_monitor = truncate_log() TimeMachine.travel_to_future( time_to_timedelta(get_configuration['metadata']['sync_interval'])) wazuh_log_monitor.start(timeout=5, callback=callback_detect_synchronization) # This should fail as we are only advancing half the time needed for synchronization to occur wazuh_log_monitor = truncate_log() TimeMachine.travel_to_future( time_to_timedelta(get_configuration['metadata']['sync_interval']) / 2) try: result = wazuh_log_monitor.start( timeout=1, callback=callback_detect_synchronization, accum_results=1).result() if result is not None: pytest.fail("Synchronization shouldn't happen at this point") except TimeoutError: return
def test_audit_key(audit_key, path, get_configuration, configure_environment, restart_syscheckd): """Check <audit_key> functionality by adding a audit rule and checking if alerts with that key are triggered when a file is created. Parameters ---------- audit_key : str Name of the audit_key to monitor path : str Path of the folder to be monitored """ logger.info('Applying the test configuration') check_apply_test({audit_key}, get_configuration['tags']) # Add watch rule add_rule_command = "auditctl -w " + path + " -p wa -k " + audit_key os.system(add_rule_command) # Restart and for wazuh control_service('stop') truncate_file(LOG_FILE_PATH) wazuh_log_monitor = FileMonitor(LOG_FILE_PATH) control_service('start') detect_initial_scan(wazuh_log_monitor) # Look for audit_key word create_file(REGULAR, path, "testfile") events = wazuh_log_monitor.start( timeout=30, callback=callback_audit_key, accum_results=1, error_message=f'Did not receive expected "Match audit_key ..." event ' f'with the command {" ".join(add_rule_command)}').result() assert audit_key in events # Remove watch rule os.system("auditctl -W " + path + " -p wa -k " + audit_key)
def test_multiple_keys(tags_to_apply, get_configuration, configure_environment, restart_syscheckd): """ Check if FIM can detect every event when adding, modifying and deleting a subkey/value within multiple registry keys monitored in the same line. Only the first 64 registry keys should be monitored. These registry keys will be added in one single entry like this one: <windows_registry>testkey0, testkey1, ..., testkeyn</windows_registry> """ check_apply_test(tags_to_apply, get_configuration['tags']) discarded = wazuh_log_monitor.start( timeout=global_parameters.default_timeout, callback=callback_max_registry_monitored, error_message='Did not receive expected ' '"Maximum number of registries to be monitored..." event.').result() assert discarded == expected_discarded, f'Discarded registry keys are not the expected ones.' detect_initial_scan( wazuh_log_monitor) # Registry scan only works in scheduled mode multiple_keys_and_entries_keys(MAX_MONITORED_ONE_TAG, subkeys, wazuh_log_monitor, KEY, timeout=global_parameters.default_timeout) time.sleep( 2 ) # These 2 seconds are needed to avoid overlapping between keys and values multiple_keys_and_entries_values(MAX_MONITORED_ONE_TAG, subkeys, wazuh_log_monitor, KEY, timeout=global_parameters.default_timeout)
def test_new_directory(tags_to_apply, get_configuration, configure_environment, restart_syscheckd): """ Check that a new monitored directory generates events after the next scheduled scan. This test performs the following steps: - Monitor a directory that does not exist. - Create the directory with files inside. Check that this does not produce events in ossec.log. - Move time forward to the next scheduled scan. - Check that now creating files within the directory do generate events. Parameters ---------- tags_to_apply : set Run test if matches with a configuration identifier, skip otherwise """ check_apply_test(tags_to_apply, get_configuration['tags']) if sys.platform != 'win32': detect_initial_scan(wazuh_log_monitor) # Create the monitored directory with files and check that events are not raised regular_file_cud(directory_str, wazuh_log_monitor, file_list=['file1', 'file2', 'file3'], min_timeout=global_parameters.default_timeout, triggers_event=False) detect_initial_scan(wazuh_log_monitor) else: detect_initial_scan(wazuh_log_monitor) # Wait for syscheck to realize the directories don't exist wazuh_log_monitor.start( timeout=10, callback=callback_non_existing_monitored_dir, error_message='Monitoring discarded message not found') os.makedirs(directory_str, exist_ok=True, mode=0o777) time.sleep(windows_audit_interval + 0.5) # Assert that events of new CUD actions are raised after next scheduled scan regular_file_cud(directory_str, wazuh_log_monitor, file_list=['file4', 'file5', 'file6'], min_timeout=40, triggers_event=True)
def generate_analysisd_yaml(n_events, modify_events): def parse_events_into_yaml(requests, yaml_file): yaml_result = [] with open(yaml_file, 'a') as y_f: id_ev = 0 for req, event in requests: type_ev = event['data']['type'] stage_ev = type_ev.title() mode = None agent_id = callback_analysisd_agent_id(req) or '000' del event['data']['mode'] del event['data']['type'] if 'tags' in event['data']: del event['data']['tags'] if type_ev == 'added': mode = 'save2' output_ev = json.dumps(event['data']) elif type_ev == 'deleted': mode = 'delete' output_ev = json.dumps(event['data']['path']).replace( '"', '') elif type_ev == 'modified': mode = 'save2' for field in [ 'old_attributes', 'changed_attributes', 'content_changes' ]: if field in event['data']: del event['data'][field] output_ev = json.dumps(event['data']) yaml_result.append({ 'name': f"{stage_ev}{id_ev}", 'test_case': [{ 'input': f"{req}", 'output': f"agent {agent_id} syscheck {mode} {output_ev}", 'stage': f"{stage_ev}" }] }) id_ev += 1 y_f.write(yaml.safe_dump(yaml_result)) def remove_logs(): for root, dirs, files in os.walk(WAZUH_LOGS_PATH): for file in files: os.remove(os.path.join(root, file)) file = 'regular' # Restart syscheckd with the new configuration truncate_file(LOG_FILE_PATH) file_monitor = FileMonitor(LOG_FILE_PATH) control_service('stop') check_daemon_status(running=False) remove_logs() control_service('start', daemon='wazuh-db', debug_mode=True) check_daemon_status(running=True, daemon='wazuh-db') control_service('start', daemon='wazuh-analysisd', debug_mode=True) check_daemon_status(running=True, daemon='wazuh-analysisd') mitm_analysisd = ManInTheMiddle(address=analysis_path, family='AF_UNIX', connection_protocol='UDP') analysis_queue = mitm_analysisd.queue mitm_analysisd.start() control_service('start', daemon='wazuh-syscheckd', debug_mode=True) check_daemon_status(running=True, daemon='wazuh-syscheckd') # Wait for initial scan detect_initial_scan(file_monitor) analysis_monitor = QueueMonitor(analysis_queue) for directory in directories_list: create_file(REGULAR, directory, file, content='') time.sleep(0.01) added = analysis_monitor.start( timeout=max(0.01 * n_events, 10), callback=callback_analysisd_event, accum_results=len(directories_list)).result() logger.debug('"added" alerts collected.') for directory in directories_list: modify_file(directory, file, new_content='Modified') time.sleep(0.01) modified = analysis_monitor.start(timeout=max(0.01 * n_events, 10), callback=callback_analysisd_event, accum_results=modify_events).result() logger.debug('"modified" alerts collected.') for directory in directories_list: delete_file(directory, file) time.sleep(0.01) deleted = analysis_monitor.start( timeout=max(0.01 * len(directories_list), 10), callback=callback_analysisd_event, accum_results=len(directories_list)).result() logger.debug('"deleted" alerts collected.') # Truncate file with open(yaml_file, 'w') as y_f: y_f.write(f'---\n') for ev_list in [added, modified, deleted]: parse_events_into_yaml(ev_list, yaml_file) logger.debug(f'YAML done: "{yaml_file}"') return mitm_analysisd
def wait_for_fim_start_function(get_configuration, request): """ Wait for realtime start, whodata start or end of initial FIM scan. """ file_monitor = getattr(request.module, 'wazuh_log_monitor') fim.detect_initial_scan(file_monitor)
def wait_for_initial_scan(get_configuration, request): # Wait for initial FIM scan to end file_monitor = getattr(request.module, 'wazuh_log_monitor') detect_initial_scan(file_monitor)