from wazuh_testing.tools.configuration import load_wazuh_configurations, check_apply_test from wazuh_testing.tools.monitoring import FileMonitor # Marks pytestmark = [pytest.mark.linux, pytest.mark.tier(level=1)] # Variables test_directories = [] testdir = os.path.join(PREFIX, 'testdir') filename = 'testfile' test_data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') configurations_path = os.path.join(test_data_path, 'wazuh_conf.yaml') wazuh_log_monitor = FileMonitor(fim.LOG_FILE_PATH) wazuh_alert_monitor = FileMonitor(ALERT_FILE_PATH) # Configurations p, m = fim.generate_params(extra_params={'TEST_DIRECTORIES': testdir}, modes=['whodata']) configurations = load_wazuh_configurations(configurations_path, __name__, params=p, metadata=m) # fixtures @pytest.fixture(scope='module', params=configurations)
pytestmark = [ pytest.mark.linux, pytest.mark.sunos5, pytest.mark.darwin, pytest.mark.tier(level=1) ] # configurations conf_params, conf_metadata = generate_params( extra_params={'FOLLOW_MODE': 'yes'}) configurations = load_wazuh_configurations(configurations_path, __name__, params=conf_params, metadata=conf_metadata) wazuh_log_monitor = FileMonitor(LOG_FILE_PATH) # fixtures @pytest.fixture(scope='module', params=configurations) def get_configuration(request): """Get configurations from the module.""" return request.param # tests @pytest.mark.parametrize('tags_to_apply, main_folder', [({'monitored_file'}, testdir1),
def reset_ossec_log(get_configuration, request): # Reset ossec.log and start a new monitor truncate_file(LOG_FILE_PATH) file_monitor = FileMonitor(LOG_FILE_PATH) setattr(request.module, 'wazuh_log_monitor', file_monitor)
""" if case == Cases.case0.value: alerts = open(ALERT_FILE_PATH, 'w') alerts.close() # Detect that the agent are been restarted def callback_detect_agent_restart(line): try: return re.match( rf'.*\"agent\":{{\"id\":\"({agent_id.zfill(3)})\".*', line).group(1) except (IndexError, AttributeError): pass FileMonitor(ALERT_FILE_PATH).start( timeout=max_time_for_agent_setup, callback=callback_detect_agent_restart).result() checksum = modify_database(agent_id, **database_params) while True: actual_n_attempts = n_attempts(agent_id) actual_n_completions = n_completions(agent_id) if not agents_dict[agent_id]['start'] and actual_n_attempts > 0: lock = Lock() lock.acquire() agents_dict[agent_id]['start'] = np.float32(time.time()) lock.release() attempts_info['start'] = True logger.info( f'Agent {agent_id} started at {agents_dict[agent_id]["start"]}' )
def test_wpk_manager(set_debug_mode, get_configuration, configure_environment, restart_service, configure_agents): metadata = get_configuration.get('metadata') protocol = metadata['protocol'] expected_status = metadata['status'] sender = Sender(SERVER_ADDRESS, protocol=protocol) log_monitor = FileMonitor(LOG_FILE_PATH) expected_error_msg = metadata.get('error_msg') sha_list = metadata.get('sha_list') injectors = [] file_name = '' installer = '' if 'VALIDSHA1' in sha_list: sha_list = get_sha_list(metadata) command = 'upgrade' if metadata.get('command') == 'upgrade_custom': command = 'upgrade_custom' if not expected_error_msg or ('The WPK file does not exist' not in expected_error_msg): file_name = metadata.get('message_params').get('file_path') file = os.path.join(UPGRADE_PATH, file_name) create_wpk_custom_file(file) metadata['message_params']['file_path'] = file sha_list = [hashlib.sha1(open(file, 'rb').read()).hexdigest()] if metadata.get('message_params').get('installer'): installer = metadata.get('message_params').get('installer') else: installer = 'upgrade.sh' for index, agent in enumerate(agents): agent.set_wpk_variables(sha_list[index], metadata['upgrade_exec_result'][index], metadata['upgrade_notification'][index], metadata['upgrade_script_result'][index], stage_disconnect=metadata['stage_disconnect'][index]) injector = Injector(sender, agent) injectors.append(injector) injector.run() if protocol == "tcp": sender = Sender(manager_address=SERVER_ADDRESS, protocol=protocol) agents_id = [int(x.id) for x in agents] data = { 'command': command, 'parameters': {'agents': agents_id} } # If have params for test case add to the data to send if metadata.get('message_params'): data['parameters'].update(metadata.get('message_params')) # remove wpk if need check http or version if metadata.get('checks') and ('use_http' in metadata.get('checks') or 'version' in metadata.get('checks')): remove_wpk_package() # Give time for registration key to be available and send a few heartbeats time.sleep(40) # Send upgrade request response = send_message(data, UPGRADE_SOCKET) if metadata.get('checks') and (('use_http' in metadata.get('checks')) or ('version' in metadata.get('checks'))): # Checking version or http in logs try: log_monitor.start(timeout=60, callback=wait_download) except TimeoutError as err: raise AssertionError("Download wpk log took too much!") last_log = log_monitor.result() if 'use_http' in metadata.get('checks'): if metadata.get('message_params') and \ metadata.get('message_params').get('use_http') and \ metadata.get('message_params').get('use_http'): assert "'http://" in last_log, "Use http protocol did not match expected! Expected 'http://'" else: assert "'https://" in last_log, "Use http protocol did not match expected! Expected 'https://'" if 'version' in metadata.get('checks'): if metadata.get('message_params') and \ metadata.get('message_params').get('version'): assert metadata.get('message_params').get('version') in \ last_log, f'Versions did not match expected! \ Expected {metadata.get("message_params").get("version")}' else: assert MANAGER_VERSION in last_log, \ f'Versions did not match expected! Expected {MANAGER_VERSION}' # let time to download wpk try: log_monitor.start(timeout=600, callback=wait_downloaded) except TimeoutError as err: raise AssertionError("Finish download wpk log took too much!") # time.sleep(60) if metadata.get('checks') and ('chunk_size' in metadata.get('checks')): # Checking version in logs try: log_monitor.start(timeout=60, callback=wait_chunk_size) except TimeoutError as err: raise AssertionError("Chunk size log tooks too much!") chunk = metadata.get('chunk_size') last_log = log_monitor.result() assert f'com write {chunk}' in last_log, \ f'Chunk size did not match expected! Expected {chunk} obtained {last_log}' if metadata.get('checks') and ('wpk_name' in metadata.get('checks')): # Checking version in logs try: log_monitor.start(timeout=180, callback=wait_wpk_custom) except TimeoutError as err: raise AssertionError("Custom wpk log tooks too much!") last_log = log_monitor.result() assert f'com upgrade {file_name} {installer}' in last_log, \ f'Wpk custom package did not match expected! ' \ f'Expected {metadata.get("message_params").get("file_path")} obtained {last_log}' if metadata.get('first_attempt'): # Chech that result of first attempt is Success assert 'Success' == response['data'][0]['message'], \ f'First upgrade response did not match expected! ' \ f'Expected {metadata.get("expected_response")} obtained {response["data"][0]["message"]}' repeat_message = data # Continue with the validations of first attempt task_ids = [item.get('agent') for item in response['data']] for index, agent_id in enumerate(task_ids): data = { "origin": { "module": "api" }, "command": 'upgrade_result', "parameters": { "agents": [agent_id] } } time.sleep(30) response = send_message(data, TASK_SOCKET) retries = 0 while (response['data'][0]['status'] != metadata.get('first_attempt')) \ and (retries < 10): time.sleep(30) response = send_message(data, TASK_SOCKET) retries += 1 assert metadata.get('first_attempt') == response['data'][0]['status'], \ f'First upgrade status did not match expected! ' \ f'Expected {metadata.get("first_attempt")} obtained {response["data"][0]["status"]}' # send upgrade request again response = send_message(repeat_message, UPGRADE_SOCKET) if metadata.get('expected_response') == 'Success': # Chech that result is expected assert metadata.get('expected_response') == response['data'][0]['message'], \ f'Upgrade response did not match expected! ' \ f'Expected {metadata.get("expected_response")} obtained {response["data"][0]["message"]}' # Continue with the test validations task_ids = [item.get('agent') for item in response['data']] for index, agent_id in enumerate(task_ids): data = { "origin": { "module": "api" }, "command": 'upgrade_result', "parameters": { "agents": [agent_id] } } time.sleep(30) response = send_message(data, TASK_SOCKET) retries = 0 while response['data'][0]['status'] == 'Updating' and retries < 30 and \ response['data'][0]['status'] != expected_status[index]: time.sleep(30) response = send_message(data, TASK_SOCKET) retries += 1 assert expected_status[index] == response['data'][0]['status'], \ f'Upgrade status did not match expected! ' \ f'Expected {expected_status[index]} obtained {response["data"][0]["status"]} at index {index}' if expected_status[index] == 'Error': assert expected_error_msg[index] == response['data'][0]['error_msg'], \ f'Error msg did not match expected! ' \ f'Expected {expected_error_msg[index]} obtained {response["data"][0]["error_msg"]} at index {index}' else: assert metadata.get('expected_response') == response['data'][0]['message'], \ f'Upgrade response did not match expected! ' \ f'Expected {metadata.get("expected_response")} obtained {response["data"][0]["message"]}' for injector in injectors: injector.stop_receive() time.sleep(3) # Wait for agents threads to stop
def generate_analysisd_yaml(n_events, modify_events): def parse_events_into_yaml(requests, yaml_file): yaml_result = [] with open(yaml_file, 'a') as y_f: id_ev = 0 for req, event in requests: type_ev = event['data']['type'] stage_ev = type_ev.title() mode = None agent_id = callback_analysisd_agent_id(req) or '000' del event['data']['mode'] del event['data']['type'] if 'tags' in event['data']: del event['data']['tags'] if type_ev == 'added': mode = 'save2' output_ev = json.dumps(event['data']) elif type_ev == 'deleted': mode = 'delete' output_ev = json.dumps(event['data']['path']).replace('"', '') elif type_ev == 'modified': mode = 'save2' for field in ['old_attributes', 'changed_attributes', 'content_changes']: if field in event['data']: del event['data'][field] output_ev = json.dumps(event['data']) yaml_result.append({ 'name': f"{stage_ev}{id_ev}", 'test_case': [ { 'input': f"{req}", 'output': f"agent {agent_id} syscheck {mode} {output_ev}", 'stage': f"{stage_ev}" } ] }) id_ev += 1 y_f.write(yaml.safe_dump(yaml_result)) def remove_logs(): for root, dirs, files in os.walk(WAZUH_LOGS_PATH): for file in files: os.remove(os.path.join(root, file)) file = 'regular' # Restart syscheckd with the new configuration truncate_file(LOG_FILE_PATH) file_monitor = FileMonitor(LOG_FILE_PATH) control_service('stop') check_daemon_status(running=False) remove_logs() control_service('start', daemon='wazuh-db', debug_mode=True) check_daemon_status(running=True, daemon='wazuh-db') control_service('start', daemon='ossec-analysisd', debug_mode=True) check_daemon_status(running=True, daemon='ossec-analysisd') mitm_analysisd = ManInTheMiddle(address=analysis_path, family='AF_UNIX', connection_protocol='UDP') analysis_queue = mitm_analysisd.queue mitm_analysisd.start() control_service('start', daemon='ossec-syscheckd', debug_mode=True) check_daemon_status(running=True, daemon='ossec-syscheckd') # Wait for initial scan detect_initial_scan(file_monitor) analysis_monitor = QueueMonitor(analysis_queue) for directory in directories_list: create_file(REGULAR, directory, file, content='') time.sleep(0.01) added = analysis_monitor.start(timeout=max(0.01 * n_events, 10), callback=callback_analysisd_event, accum_results=len(directories_list)).result() logger.debug('"added" alerts collected.') for directory in directories_list: modify_file(directory, file, new_content='Modified') time.sleep(0.01) modified = analysis_monitor.start(timeout=max(0.01 * n_events, 10), callback=callback_analysisd_event, accum_results=modify_events).result() logger.debug('"modified" alerts collected.') for directory in directories_list: delete_file(directory, file) time.sleep(0.01) deleted = analysis_monitor.start(timeout=max(0.01 * len(directories_list), 10), callback=callback_analysisd_event, accum_results=len(directories_list)).result() logger.debug('"deleted" alerts collected.') # Truncate file with open(yaml_file, 'w')as y_f: y_f.write(f'---\n') for ev_list in [added, modified, deleted]: parse_events_into_yaml(ev_list, yaml_file) logger.debug(f'YAML done: "{yaml_file}"') return mitm_analysisd
# reset_client_keys clean_client_keys_file() # Start Wazuh control_service('start') """Wait until agentd has begun""" def callback_agentd_startup(line): if 'Accepting connections on port 1515' in line: return line return None log_monitor = FileMonitor(LOG_FILE_PATH) log_monitor.start(timeout=30, callback=callback_agentd_startup) # @pytest.mark.parametrize('test_case', [case['test_case'] for case in ssl_configuration_tests]) def test_ossec_auth_name_ip_pass(get_configuration, configure_environment, configure_sockets_environment): """Check that every input message in authd port generates the adequate output Parameters ---------- test_case : list List of test_cases, dict with following keys: - input: message that will be tried to send to the manager - output: expected response - insert_prev_agent: yes or no (for duplicated ip or name cases)
from wazuh_testing import global_parameters from wazuh_testing.analysis import callback_fim_alert, callback_analysisd_message, validate_analysis_alert, \ callback_wazuh_db_message from wazuh_testing.tools import WAZUH_LOGS_PATH, WAZUH_PATH, LOG_FILE_PATH from wazuh_testing.tools.monitoring import FileMonitor # marks pytestmark = [pytest.mark.linux, pytest.mark.tier(level=0)] # variables test_data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') alerts_json = os.path.join(WAZUH_LOGS_PATH, 'alerts', 'alerts.json') wazuh_log_monitor = FileMonitor(alerts_json) messages_path = os.path.join(test_data_path, 'event_messages.yaml') with open(messages_path) as f: test_cases = yaml.safe_load(f) wdb_path = os.path.join(os.path.join(WAZUH_PATH, 'queue', 'db', 'wdb')) analysis_path = os.path.join( os.path.join(WAZUH_PATH, 'queue', 'ossec', 'queue')) monitored_sockets, receiver_sockets = None, None # These variables will be set in the fixture create_unix_sockets monitored_sockets_params = [(wdb_path, 'TCP')] receiver_sockets_params = [(analysis_path, 'UDP')] analysis_monitor = None wdb_monitor = None wazuh_log_monitor = FileMonitor(alerts_json) # tests
import os import jsonschema import pytest from wazuh_testing.mitre import (callback_detect_mitre_event, validate_mitre_event) from wazuh_testing.tools import ALERT_FILE_PATH from wazuh_testing.tools.monitoring import FileMonitor # Marks pytestmark = [pytest.mark.linux, pytest.mark.tier(level=0), pytest.mark.server] # variables wazuh_alert_monitor = FileMonitor(ALERT_FILE_PATH) _data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') invalid_configurations = [] configurations = [] for i in range(1, 15): file_test = os.path.join(_data_path, f"test{i}.xml") configurations.append(file_test) if i in range(5, 9): invalid_configurations.append(os.path.join(_data_path, f"test{i}.xml")) # fixtures @pytest.fixture(scope='module', params=configurations)
def test_execd_restart(set_debug_mode, get_configuration, test_version, configure_environment, start_agent, set_ar_conf_mode): """Check if restart-wazuh Active Response is executed correctly. Args: set_debug_mode (fixture): Set execd daemon in debug mode. get_configuration (fixture): Get configurations from the module. test_version (fixture): Validate Wazuh version. configure_environment (fixture): Configure a custom environment for testing. start_agent (fixture): Create Remoted and Authd simulators, register agent and start it. set_ar_conf_mode (fixture): Configure Active Responses used in tests. """ metadata = get_configuration['metadata'] expected = metadata['results'] ossec_log_monitor = FileMonitor(LOG_FILE_PATH) ar_log_monitor = FileMonitor(execd.AR_LOG_FILE_PATH) # Checking AR in ossec logs ossec_log_monitor.start(timeout=60, callback=execd.wait_received_message_line) # Checking AR in active-response logs ar_log_monitor.start(timeout=60, callback=execd.wait_start_message_line) if expected['success']: ar_log_monitor.start(timeout=60, callback=wait_message_line) # Checking shutdown message in ossec logs ossec_log_monitor.start(timeout=60, callback=wait_shutdown_message_line) ar_log_monitor.start(timeout=60, callback=execd.wait_ended_message_line) else: ar_log_monitor.start(timeout=60, callback=wait_invalid_input_message_line)
def recursion_test(dirname, subdirname, recursion_level, timeout=1, edge_limit=2, ignored_levels=1, is_scheduled=False): """ Check that events are generated in the first and last `edge_limit` directory levels in the hierarchy dirname/subdirname1/.../subdirname{recursion_level}. It also checks that no events are generated for subdirname{recursion_level+ignored_levels}. All directories and subdirectories needed will be created using the info provided by parameter. Example: recursion_level = 10 edge_limit = 2 ignored_levels = 2 dirname = "/testdir" subdirname = "subdir" With those parameters this function will create files and expect to detect 'added', 'modified' and 'deleted' events for the following directories only, as they are the first and last 2 subdirectories within recursion level 10: /testdir/subdir1 /testdir/subdir1/subdir2 /testdir/subdir1/subdir2/subdir3/subdir4/subdir5/subdir6/subdir7/subdir8/subdir9/ /testdir/subdir1/subdir2/subdir3/subdir4/subdir5/subdir6/subdir7/subdir8/subdir9/subdir10 As ignored_levels value is 2, this function will also create files on the following directories and ensure that no events are raised as they are outside the recursion level specified: /testdir/subdir1/subdir2/subdir3/subdir4/subdir5/subdir6/subdir7/subdir8/subdir9/subdir10/subdir11 /testdir/subdir1/subdir2/subdir3/subdir4/subdir5/subdir6/subdir7/subdir8/subdir9/subdir10/subdir11/subdir12 This function also takes into account that a very long path will raise a FileNotFound Exception on Windows because of its path length limitations. In a similar way, on Linux environments a `Event Too Long` will be raised if the path name is too long. Parameters ---------- dirname : str The path being monitored by syscheck (indicated in the .conf file). subdirname : str The name of the subdirectories that will be created during the execution for testing purposes. recursion_level : int Recursion level. Also used as the number of subdirectories to be created and checked for the current test. timeout : int Max time to wait until an event is raised. edge_limit : int Number of directories where the test will monitor events. ignored_levels : int Number of directories exceeding the specified recursion_level to verify events are not raised. is_scheduled : bool If True the internal date will be modified to trigger scheduled checks by syschecks. False if realtime or Whodata. """ path = dirname try: # Check True (Within the specified recursion level) for n in range(recursion_level): path = os.path.join(path, subdirname + str(n + 1)) if ((recursion_level < edge_limit * 2) or (recursion_level >= edge_limit * 2 and n < edge_limit) or (recursion_level >= edge_limit * 2 and n > recursion_level - edge_limit)): regular_file_cud(path, wazuh_log_monitor, time_travel=is_scheduled, min_timeout=timeout) # Check False (exceeding the specified recursion_level) for n in range(recursion_level, recursion_level + ignored_levels): path = os.path.join(path, subdirname + str(n + 1)) regular_file_cud(path, wazuh_log_monitor, time_travel=is_scheduled, min_timeout=timeout, triggers_event=False) except TimeoutError: timeout_log_monitor = FileMonitor(LOG_FILE_PATH) if timeout_log_monitor.start( timeout=5, callback=callback_audit_event_too_long).result(): pytest.fail("Audit raised 'Event Too Long' message.") raise except FileNotFoundError as ex: MAX_PATH_LENGTH_WINDOWS_ERROR = 206 if ex.winerror != MAX_PATH_LENGTH_WINDOWS_ERROR: raise except OSError as ex: MAX_PATH_LENGTH_MACOS_ERROR = 63 MAX_PATH_LENGTH_SOLARIS_ERROR = 78 if ex.errno not in (MAX_PATH_LENGTH_SOLARIS_ERROR, MAX_PATH_LENGTH_MACOS_ERROR): raise