Esempio n. 1
0
def test_agent_remote_configuration(agent_name, get_configuration,
                                    configure_environment, remove_shared_files,
                                    restart_remoted, create_agent_group):
    """Check if the agents correctly send their version, receive the shared configuration, and finally,
        the start-up message is received and processed by the manager.

    Raises:
        AssertionError: if `wazuh-db` returns a wrong agent version, agents do not receive shared configuration or
        startup message after agent restart is not received.
    """

    protocols = get_configuration['metadata']['protocol']

    for protocol in protocols.split(","):
        agent = ag.Agent(**agent_info[agent_name])

        # Sleep to avoid ConnectionRefusedError
        sleep(1)

        sender = ag.Sender(agent_info[agent_name]['manager_address'],
                           protocol=protocol)

        check_push_shared_config(agent, sender)

        wazuh_db_agent_version = agent.get_agent_version()
        assert wazuh_db_agent_version == fr"Wazuh {agent_info[agent_name]['version']}"

        wazuh_log_monitor = FileMonitor(LOG_FILE_PATH)
        log_callback = remote.callback_start_up(agent.name)
        wazuh_log_monitor.start(
            timeout=10,
            callback=log_callback,
            error_message='The start up message has not been found in the logs'
        )
def check_configuration_age_valid(cfg):
    """Check if the Wazuh module runs correctly and analyze the desired file.

    Ensure logcollector is running with the specified configuration, analyzing the designated file and,
    in the case of the Wazuh server, check if the API answer for localfile configuration block coincides
    the selected configuration.

    Args:
        cfg (dict): Dictionary with the localfile configuration.

    Raises:
        TimeoutError: If the "Analyzing file" callback is not generated.
        AssertError: In the case of a server instance, the API response is different from real configuration.
    """
    wazuh_log_monitor = FileMonitor(LOG_FILE_PATH)

    log_callback = logcollector.callback_analyzing_file(cfg['location'],
                                                        prefix=prefix)
    wazuh_log_monitor.start(
        timeout=5,
        callback=log_callback,
        error_message=logcollector.GENERIC_CALLBACK_ERROR_ANALYZING_FILE)
    if wazuh_component == 'wazuh-manager':
        real_configuration = cfg.copy()
        real_configuration.pop('valid_value')
        api.wait_until_api_ready()
        api.compare_config_api_response([real_configuration], 'localfile')
def override_wazuh_conf(configuration, set_password):
    # Stop Wazuh
    control_service('stop', daemon='wazuh-authd')
    time.sleep(1)
    check_daemon_status(running=False, daemon='wazuh-authd')
    truncate_file(LOG_FILE_PATH)

    # Configuration for testing
    test_config = set_section_wazuh_conf(configuration.get('sections'))
    # Set new configuration
    write_wazuh_conf(test_config)

    # reset_client_keys
    clean_client_keys_file()
    # reset password
    reset_password(set_password)

    time.sleep(1)
    # Start Wazuh
    control_service('start', daemon='wazuh-authd')
    """Wait until agentd has begun"""
    def callback_agentd_startup(line):
        if 'Accepting connections on port 1515' in line:
            return line
        return None

    log_monitor = FileMonitor(LOG_FILE_PATH)
    log_monitor.start(timeout=30, callback=callback_agentd_startup)
    time.sleep(1)
Esempio n. 4
0
def test_agentd_state_config(configure_environment, test_case: list):

    control_service('stop', 'wazuh-agentd')

    # Truncate ossec.log in order to watch it correctly
    truncate_file(LOG_FILE_PATH)

    # Remove state file to check if agent behavior is as expected
    os.remove(state_file_path) if os.path.exists(state_file_path) else None

    # Set state interval value according to test case specs
    set_state_interval(test_case['interval'], internal_options)

    control_service('start', 'wazuh-agentd')

    # Check if test require checking state file existance
    if 'state_file_exist' in test_case:
        if test_case['state_file_exist']:
            # Wait until state file was dumped
            time.sleep(test_case['interval'])
        assert test_case['state_file_exist'] == os.path.exists(state_file_path)

    # Follow ossec.log to find desired messages by a callback
    wazuh_log_monitor = FileMonitor(LOG_FILE_PATH)
    wazuh_log_monitor.start(timeout=global_parameters.default_timeout,
                            callback=callbacks.get(test_case['log_expect']),
                            error_message='Event not found')
    assert wazuh_log_monitor.result()

    # Check if test require checking agentd status
    if 'agentd_ends' in test_case:
        assert (test_case['agentd_ends']
                is not check_if_process_is_running('wazuh-agentd'))
Esempio n. 5
0
def check_client_keys_file():
    """Wait until client key has been written"""
    def wait_key_changes(line):
        if 'Valid key received' in line:
            return line
        return None

    log_monitor = FileMonitor(LOG_FILE_PATH)
    try:
        log_monitor.start(timeout=6, callback=wait_key_changes)
    except:
        pass
    try:
        with open(CLIENT_KEYS_PATH) as client_file:
            client_line = client_file.readline()
            # check format key 4 items (id name ip key)
            if len(client_line.split(" ")) != 4:
                client_file.close()
                return False
            client_file.close()
            return f"OSSEC K:'{client_line[:-1]}'\n"
    except IOError:
        raise
    client_file.close()
    return False
def check_ignore_binaries_valid(cfg):
    """Check if the Wazuh runs correctly with the specified ignore_binaries field value.

    Ensure logcollector allows the specified ignore_binaries attribute. Also, in the case of the manager instance,
    check if the API answer for localfile block coincides.

    Args:
        cfg (dict): Dictionary with the localfile configuration.

    Raises:
        TimeoutError: In the case of Windows system, the callback for an invalid location pattern is not generated.
        AssertError: In the case of a server instance, the API response is different than the real configuration.
    """
    wazuh_log_monitor = FileMonitor(LOG_FILE_PATH)

    if sys.platform == 'win32':
        log_callback = logcollector.callback_invalid_location_pattern(
            cfg['location'], prefix=prefix)
        wazuh_log_monitor.start(
            timeout=5,
            callback=log_callback,
            error_message=logcollector.GENERIC_CALLBACK_ERROR_INVALID_LOCATION)

    if wazuh_component == 'wazuh-manager':
        real_configuration = cfg.copy()
        real_configuration.pop('valid_value')
        api.wait_until_api_ready()
        api.compare_config_api_response([real_configuration], 'localfile')
    else:
        if sys.platform == 'win32':
            assert get_process_cmd('wazuh-agent.exe') != 'None'
        else:
            assert check_if_process_is_running('wazuh-logcollector')
Esempio n. 7
0
def wait_for_analysisd_startup(request):
    """Wait until analysisd has begun and alerts.json is created."""
    def callback_analysisd_startup(line):
        if 'Input message handler thread started.' in line:
            return line
        return None

    log_monitor = FileMonitor(LOG_FILE_PATH)
    log_monitor.start(timeout=30, callback=callback_analysisd_startup)
Esempio n. 8
0
def test_skip_proc(get_configuration, configure_environment, restart_syscheckd,
                   wait_for_initial_scan):
    """Check if syscheckd skips /proc when setting 'skip_proc="yes"'."""
    check_apply_test({'skip_proc'}, get_configuration['tags'])
    trigger = get_configuration['metadata']['skip'] == 'no'

    if trigger:
        proc = subprocess.Popen([
            "python3",
            f"{os.path.dirname(os.path.abspath(__file__))}/data/proc.py"
        ])

        # Change configuration, monitoring the PID path in /proc
        # Monitor only /proc/PID to expect only these events. Otherwise, it will fail due to Timeouts since
        # integrity scans will take too long
        new_conf = change_conf(f'/proc/{proc.pid}')
        new_ossec_conf = []

        # Get new skip_proc configuration
        for conf in new_conf:
            if conf['metadata']['skip'] == 'no' and conf['tags'] == [
                    'skip_proc'
            ]:
                new_ossec_conf = set_section_wazuh_conf(conf.get('sections'))
        restart_wazuh_with_new_conf(new_ossec_conf)
        truncate_file(LOG_FILE_PATH)
        proc_monitor = FileMonitor(LOG_FILE_PATH)
        detect_initial_scan(proc_monitor)

        # Do not expect any 'Sending event'
        with pytest.raises(TimeoutError):
            proc_monitor.start(
                timeout=3,
                callback=callback_detect_event,
                error_message=
                'Did not receive expected "Sending FIM event: ..." event')

        check_time_travel(time_travel=True, monitor=wazuh_log_monitor)

        found_event = False
        while not found_event:
            event = proc_monitor.start(
                timeout=5,
                callback=callback_detect_event,
                error_message='Did not receive expected '
                '"Sending FIM event: ..." event').result()
            if f'/proc/{proc.pid}/' in event['data'].get('path'):
                found_event = True

        # Kill the process
        subprocess.Popen(["kill", "-9", str(proc.pid)])

    else:
        with pytest.raises(TimeoutError):
            event = wazuh_log_monitor.start(
                timeout=3, callback=callback_detect_integrity_state)
            raise AttributeError(f'Unexpected event {event}')
Esempio n. 9
0
def wait_for_agentd_startup(request):
    """Wait until agentd has begun"""

    def callback_agentd_startup(line):
        if 'Accepting connections on port 1515' in line:
            return line
        return None

    log_monitor = FileMonitor(LOG_FILE_PATH)
    log_monitor.start(timeout=30, callback=callback_agentd_startup)
def test_execd_firewall_drop(set_debug_mode, get_configuration, test_version,
                             configure_environment, remove_ip_from_iptables,
                             start_agent, set_ar_conf_mode):
    """Check if firewall-drop Active Response is executed correctly.

    Args:
        set_debug_mode (fixture): Set execd daemon in debug mode.
        test_version (fixture): Validate Wazuh version.
        set_ar_conf_mode (fixture): Configure Active Responses used in tests.
        start_agent (fixture): Create Remoted and Authd simulators, register agent and start it.
        remove_ip_from_iptables (fixture): Remove the test IP from iptables if it exist
    """
    metadata = get_configuration['metadata']
    expected = metadata['results']
    ossec_log_monitor = FileMonitor(LOG_FILE_PATH)
    ar_log_monitor = FileMonitor(execd.AR_LOG_FILE_PATH)

    # Checking AR in ossec logs
    ossec_log_monitor.start(timeout=60,
                            callback=execd.wait_received_message_line)

    # Checking AR in active-response logs
    ar_log_monitor.start(timeout=60, callback=execd.wait_start_message_line)

    if expected['success']:
        for command_id in range(2):
            ar_log_monitor.start(timeout=60, callback=wait_message_line)
            last_log = ar_log_monitor.result()
            validate_ar_message(last_log, command_id)

            ar_log_monitor.start(timeout=60,
                                 callback=execd.wait_ended_message_line)

            # Checking if the IP was added/removed in iptables
            iptables_file = os.popen('iptables -L')
            flag = False
            for iptables_line in iptables_file:
                if metadata['ip'] in iptables_line:
                    flag = True

            if not flag and command_id == 0:
                raise AssertionError("IP was not added to iptable")
            elif flag and command_id == 1:
                raise AssertionError("IP was not deleted from iptable")

            time.sleep(5)
    else:
        ar_log_monitor.start(timeout=60,
                             callback=wait_invalid_input_message_line)
Esempio n. 11
0
def check_configuration_reconnect_time_valid():
    """Check if Wazuh module correctly runs and analyzes the desired eventchannel.

    Ensure logcollector is running with the specified configuration, analyzing the designate eventchannel.

    Raises:
        TimeoutError: If the "Analyzing eventchannel" callback is not generated.
    """
    wazuh_log_monitor = FileMonitor(LOG_FILE_PATH)

    log_callback = logcollector.callback_eventchannel_analyzing('Security')
    wazuh_log_monitor.start(
        timeout=5,
        callback=log_callback,
        error_message="The expected error output has not been produced")
def check_time_to_connect(timeout):
    """Wait until client try connect.

    Args:
        timeout (int, optional): Maximum timeout. Default `-1`

    Returns:
        int: Integer with elapsed time in seconds.
    """
    def wait_connect(line):
        if 'Trying to connect to server' in line:
            return line
        return None

    log_monitor = FileMonitor(LOG_FILE_PATH)
    try:
        log_monitor.start(timeout=timeout + 2, callback=wait_connect)
    except TimeoutError:
        return -1

    final_line = log_monitor.result()
    initial_line = None
    elapsed_time = None

    with open(LOG_FILE_PATH, 'r') as log_file:
        lines = log_file.readlines()
        # find enrollment end
        for line in lines:
            if "INFO: Valid key received" in line:
                initial_line = line
                break

    if initial_line is not None and final_line is not None:
        form = '%H:%M:%S'
        initial_time = datetime.datetime.strptime(initial_line.split()[1],
                                                  form).time()
        final_time = datetime.datetime.strptime(final_line.split()[1],
                                                form).time()
        initial_delta = datetime.timedelta(hours=initial_time.hour,
                                           minutes=initial_time.minute,
                                           seconds=initial_time.second)
        final_delta = datetime.timedelta(hours=final_time.hour,
                                         minutes=final_time.minute,
                                         seconds=final_time.second)
        elapsed_time = (final_delta - initial_delta).total_seconds()

    return elapsed_time
def test_agentd_connection_retries_pre_enrollment(configure_authd_server,
                                                  configure_environment,
                                                  get_configuration):
    """Check how the agent behaves when Remoted is not available and performs multiple connection attempts to it.

    For this, the agent starts with keys but Remoted is not available for several seconds,
    then the agent performs multiple connection retries before requesting a new enrollment.

    Args:
        configure_authd_server (fixture): Initialize a simulated authd connection.
        stop_authd (fixture): Disable authd to accept connections and perform enrollments.
        set_keys (fixture): Write to client.keys file the agent's enrollment details.
        configure_environment (fixture): Configure a custom environment for testing.
        get_configuration (fixture): Get configurations from the module.
    """
    global remoted_server
    REMOTED_KEYS_SYNC_TIME = 10

    # Start Remoted mock
    remoted_server = RemotedSimulator(
        protocol=get_configuration['metadata']['PROTOCOL'],
        client_keys=CLIENT_KEYS_PATH)
    # Stop target Agent
    control_service('stop')
    # Clean logs
    truncate_file(LOG_FILE_PATH)
    # Prepare test
    stop_authd()
    set_keys()
    # Start hearing logs
    log_monitor = FileMonitor(LOG_FILE_PATH)
    # Start whole Agent service to check other daemons status after initialization
    control_service('start')
    # Simulate time of Remoted to synchronize keys by waiting previous to start responding
    remoted_server.set_mode('CONTROLLED_ACK')
    sleep(REMOTED_KEYS_SYNC_TIME)

    # Check Agentd is finally communicating
    log_monitor.start(
        timeout=120,
        callback=wait_notify,
        error_message="Notify message from agent was never sent!")
def check_configuration_age_invalid(cfg):
    """Check if the Wazuh fails because the invalid age configuration value.

    Args:
        cfg (dict): Dictionary with the localfile configuration.

    Raises:
        TimeoutError: If error callback are not generated.
    """
    wazuh_log_monitor = FileMonitor(LOG_FILE_PATH)

    if cfg['age'] in problematic_values:
        pytest.xfail(
            "Logcollector accepts invalid values. Issue: https://github.com/wazuh/wazuh/issues/8158"
        )

    log_callback = gc.callback_invalid_conf_for_localfile('age',
                                                          prefix,
                                                          severity='ERROR')
    wazuh_log_monitor.start(timeout=5,
                            callback=log_callback,
                            error_message=gc.GENERIC_CALLBACK_ERROR_MESSAGE)
    log_callback = gc.callback_error_in_configuration(
        'ERROR', prefix, conf_path=f'{wazuh_configuration}')
    wazuh_log_monitor.start(timeout=5,
                            callback=log_callback,
                            error_message=gc.GENERIC_CALLBACK_ERROR_MESSAGE)

    if sys.platform != 'win32':
        log_callback = gc.callback_error_in_configuration(
            'CRITICAL', prefix, conf_path=f'{wazuh_configuration}')
        wazuh_log_monitor.start(
            timeout=5,
            callback=log_callback,
            error_message=gc.GENERIC_CALLBACK_ERROR_MESSAGE)
def check_ignore_binaries_invalid(cfg):
    """Check if the Wazuh fails using a invalid ignore_binaries configuration value.

    Args:
        cfg (dict): Dictionary with the localfile configuration.

    Raises:
        TimeoutError: If error callbacks are not generated.
    """
    wazuh_log_monitor = FileMonitor(LOG_FILE_PATH)

    log_callback = gc.callback_invalid_value('ignore_binaries',
                                             cfg['ignore_binaries'], prefix)
    wazuh_log_monitor.start(timeout=5,
                            callback=log_callback,
                            error_message=gc.GENERIC_CALLBACK_ERROR_MESSAGE)

    log_callback = gc.callback_error_in_configuration(
        'ERROR', prefix, conf_path=f'{wazuh_configuration}')
    wazuh_log_monitor.start(timeout=5,
                            callback=log_callback,
                            error_message=gc.GENERIC_CALLBACK_ERROR_MESSAGE)

    if sys.platform != 'win32':
        log_callback = gc.callback_error_in_configuration(
            'CRITICAL', prefix, conf_path=f'{wazuh_configuration}')
        wazuh_log_monitor.start(
            timeout=5,
            callback=log_callback,
            error_message=gc.GENERIC_CALLBACK_ERROR_MESSAGE)
def check_log_format_invalid(cfg):
    wazuh_log_monitor = FileMonitor(LOG_FILE_PATH)
    """Check if Wazuh fails because a invalid frequency configuration value.

    Args:
        cfg (dict): Dictionary with the localfile configuration.

    Raises:
        TimeoutError: If error callback are not generated.
    """

    if cfg['valid_value']:
        pytest.skip('Valid values provided')

    log_callback = gc.callback_invalid_value('log_format', cfg['log_format'],
                                             prefix)
    wazuh_log_monitor.start(timeout=5,
                            callback=log_callback,
                            error_message=gc.GENERIC_CALLBACK_ERROR_MESSAGE)

    log_callback = gc.callback_error_in_configuration(
        'ERROR', prefix, conf_path=f'{wazuh_configuration}')
    wazuh_log_monitor.start(timeout=5,
                            callback=log_callback,
                            error_message=gc.GENERIC_CALLBACK_ERROR_MESSAGE)

    if sys.platform != 'win32':
        log_callback = gc.callback_error_in_configuration(
            'CRITICAL', prefix, conf_path=f'{wazuh_configuration}')
        wazuh_log_monitor.start(
            timeout=5,
            callback=log_callback,
            error_message=gc.GENERIC_CALLBACK_ERROR_MESSAGE)
Esempio n. 17
0
def check_time_travel(time_travel: bool,
                      interval: timedelta = timedelta(hours=13),
                      monitor: FileMonitor = None):
    """
    Change date and time of the system depending on a boolean condition. Optionally, a monitor may be used to check
    if a scheduled scan has been performed.

    This function is specially useful to deal with scheduled scans that are triggered on a time interval basis.

    Parameters
    ----------
    time_travel : boolean
        True if we need to update time. False otherwise.
    interval : timedelta, optional
        Time interval that will be added to system clock. Default: 13 hours.
    monitor : FileMonitor, optional
        If passed, after changing system clock it will check for the end of the scheduled scan. The `monitor` will not
        consume any log line. Default `None`.

    Raises
    ------
    TimeoutError
        If `monitor` is not `None` and the scan has not ended in the default timeout specified in `global_parameters`.
    """
    if time_travel:
        before = str(datetime.now())
        TimeMachine.travel_to_future(interval)
        logger.info(
            f"Changing the system clock from {before} to {str(datetime.now())}"
        )

        if monitor:
            monitor.start(
                timeout=global_parameters.default_timeout,
                callback=callback_detect_end_scan,
                update_position=False,
                error_message=f'End of scheduled scan not detected after '
                f'{global_parameters.default_timeout} seconds')
Esempio n. 18
0
def check_configuration_reconnect_time_invalid(cfg):
    """Check if Wazuh fails due to a invalid reconnect time attribute configuration value.

    Args:
        cfg (dict): Dictionary with the localfile configuration.

    Raises:
        TimeoutError: If error callback are not generated.
    """
    wazuh_log_monitor = FileMonitor(LOG_FILE_PATH)

    if cfg['reconnect_time'] in problematic_values:
        pytest.xfail(
            "Logcolector accepts invalid values. Issue: https://github.com/wazuh/wazuh/issues/8158"
        )

    log_callback = logcollector.callback_invalid_reconnection_time(
        prefix=prefix)
    wazuh_log_monitor.start(
        timeout=5,
        callback=log_callback,
        error_message=
        "The expected invalid reconnection time error has not been produced")
def test_configuration_age_datetime(new_datetime, get_files_list,
                                    get_configuration,
                                    create_file_structure_function,
                                    configure_environment):
    """Check if logcollector age option works correctly when date time of the system changes.

    Ensure that when date of the system change logcollector use properly age value, ignoring files that have not been
    modified for a time greater than age value using current date.

    Raises:
        TimeoutError: If the expected callbacks are not generated.
    """
    cfg = get_configuration['metadata']
    age_seconds = time_to_seconds(cfg['age'])

    control_service('stop', daemon=DAEMON_NAME)
    truncate_file(LOG_FILE_PATH)
    wazuh_log_monitor = FileMonitor(LOG_FILE_PATH)
    control_service('start', daemon=DAEMON_NAME)

    TimeMachine.travel_to_future(time_to_timedelta(new_datetime))

    for file in file_structure:
        for name in file['filename']:
            absolute_file_path = os.path.join(file['folder_path'], name)

            log_callback = logcollector.callback_match_pattern_file(
                cfg['location'], absolute_file_path)
            wazuh_log_monitor.start(timeout=5,
                                    callback=log_callback,
                                    error_message=f"{name} was not detected")

            fileinfo = os.stat(absolute_file_path)
            current_time = time.time()
            mfile_time = current_time - fileinfo.st_mtime

            if age_seconds <= int(mfile_time):
                log_callback = logcollector.callback_ignoring_file(
                    absolute_file_path)
                wazuh_log_monitor.start(
                    timeout=5,
                    callback=log_callback,
                    error_message=f"{name} was not ignored")
            else:
                with pytest.raises(TimeoutError):
                    log_callback = logcollector.callback_ignoring_file(
                        absolute_file_path)
                    wazuh_log_monitor.start(
                        timeout=5,
                        callback=log_callback,
                        error_message=f"{name} was not ignored")

        TimeMachine.time_rollback()
Esempio n. 20
0
def test_audit_key(audit_key, path, get_configuration, configure_environment, restart_syscheckd):
    """Check `<audit_key>` functionality by adding a audit rule and checking if alerts with that key are triggered when
    a file is created.

    Args:
        audit_key (str): Name of the audit_key to monitor.
        tags_to_apply (set): Run test if matches with a configuration identifier, skip otherwise.
        get_configuration (fixture): Gets the current configuration of the test.
        configure_environment (fixture): Configure the environment for the execution of the test.
        restart_syscheckd (fixture): Restarts syscheck.
        wait_for_fim_start (fixture): Waits until the first FIM scan is completed.

    Raises:
        TimeoutError: If an expected event couldn't be captured.
        ValueError: If the path of the event is wrong.
    """

    logger.info('Applying the test configuration')
    check_apply_test({audit_key}, get_configuration['tags'])

    # Add watch rule
    add_rule_command = "auditctl -w " + path + " -p wa -k " + audit_key
    os.system(add_rule_command)

    # Restart and for wazuh
    control_service('stop')
    truncate_file(fim.LOG_FILE_PATH)
    wazuh_log_monitor = FileMonitor(fim.LOG_FILE_PATH)
    control_service('start')
    fim.detect_initial_scan(wazuh_log_monitor)

    # Look for audit_key word
    fim.create_file(fim.REGULAR, path, "testfile")
    events = wazuh_log_monitor.start(timeout=30,
                                     callback=fim.callback_audit_key,
                                     accum_results=1,
                                     error_message=f'Did not receive expected "Match audit_key ..." event '
                                                   f'with the command {" ".join(add_rule_command)}').result()
    assert audit_key in events

    # Remove watch rule
    os.system("auditctl -W " + path + " -p wa -k " + audit_key)
Esempio n. 21
0
def configure_syscheck_environment(time_sleep):
    # Create every needed directory
    for n in range(n_directories):
        t_dir = os.path.join(PREFIX, f'{testdir}{n}')
        os.makedirs(t_dir, exist_ok=True, mode=0o777)
        directories_list.append(t_dir)

    wazuh_log_monitor = FileMonitor(LOG_FILE_PATH)
    control_service('restart')
    logger.debug('Waiting 15 seconds for syscheckd to start.')
    time.sleep(15)

    file = 'regular'

    logger.debug(
        f'Waiting {str(time_sleep)} seconds. Execute `generate_windows_yaml.py` now.'
    )
    time.sleep(time_sleep)

    logger.debug('Creating files...')
    for directory in directories_list:
        create_file(REGULAR, directory, file, content='')
        time.sleep(0.01)
    try:
        while True:
            wazuh_log_monitor.start(timeout=5, callback=callback_detect_event)
    except TimeoutError:
        pass

    logger.debug('Modifying files...')
    for directory in directories_list:
        modify_file(directory, file, new_content='Modified')
        time.sleep(0.01)
    try:
        while True:
            wazuh_log_monitor.start(timeout=5, callback=callback_detect_event)
    except TimeoutError:
        pass

    logger.debug('Deleting files...')
    for directory in directories_list:
        delete_file(directory, file)
        time.sleep(0.01)
    try:
        while True:
            wazuh_log_monitor.start(timeout=5, callback=callback_detect_event)
    except TimeoutError:
        pass
Esempio n. 22
0
def test_audit_key(audit_key, path, get_configuration, configure_environment,
                   restart_syscheckd):
    """Check <audit_key> functionality by adding a audit rule and checking if alerts with that key are triggered when
    a file is created.

    Parameters
    ----------
    audit_key : str
        Name of the audit_key to monitor
    path : str
        Path of the folder to be monitored
    """
    logger.info('Applying the test configuration')
    check_apply_test({audit_key}, get_configuration['tags'])

    # Add watch rule
    add_rule_command = "auditctl -w " + path + " -p wa -k " + audit_key
    os.system(add_rule_command)

    # Restart and for wazuh
    control_service('stop')
    truncate_file(LOG_FILE_PATH)
    wazuh_log_monitor = FileMonitor(LOG_FILE_PATH)
    control_service('start')
    detect_initial_scan(wazuh_log_monitor)

    # Look for audit_key word
    create_file(REGULAR, path, "testfile")
    events = wazuh_log_monitor.start(
        timeout=30,
        callback=callback_audit_key,
        accum_results=1,
        error_message=f'Did not receive expected "Match audit_key ..." event '
        f'with the command {" ".join(add_rule_command)}').result()
    assert audit_key in events

    # Remove watch rule
    os.system("auditctl -W " + path + " -p wa -k " + audit_key)
def check_log_format_valid(cfg):
    """Check if Wazuh run correctly with the specified log formats.

    Ensure logcollector allows the specified log formats. Also, in the case of the manager instance, check if the API
    answer for localfile block coincides.

    Raises:
        TimeoutError: If the "Analyzing file" callback is not generated.
        AssertError: In the case of a server instance, the API response is different that the real configuration.
    """
    wazuh_log_monitor = FileMonitor(LOG_FILE_PATH)

    if cfg['log_format'] not in log_format_not_print_analyzing_info:
        log_callback = logcollector.callback_analyzing_file(cfg['location'],
                                                            prefix=prefix)
        wazuh_log_monitor.start(
            timeout=5,
            callback=log_callback,
            error_message=logcollector.GENERIC_CALLBACK_ERROR_ANALYZING_FILE)
    elif 'command' in cfg['log_format']:
        log_callback = logcollector.callback_monitoring_command(
            cfg['log_format'], cfg['command'], prefix=prefix)
        wazuh_log_monitor.start(timeout=5,
                                callback=log_callback,
                                error_message=logcollector.
                                GENERIC_CALLBACK_ERROR_COMMAND_MONITORING)
    elif cfg['log_format'] == 'djb-multilog':
        log_callback = logcollector.callback_monitoring_djb_multilog(
            cfg['location'], prefix=prefix)
        wazuh_log_monitor.start(
            timeout=5,
            callback=log_callback,
            error_message="The expected multilog djb log has not been produced"
        )

    if wazuh_component == 'wazuh-manager':
        real_configuration = cfg.copy()
        real_configuration.pop('valid_value')
        api.wait_until_api_ready()
        api.compare_config_api_response([real_configuration], 'localfile')
def test_agentd_reconection_enrollment_no_keys(configure_authd_server,
                                               configure_environment,
                                               get_configuration):
    """Check how the agent behaves when losing communication with remoted and a new enrollment is sent to authd.

    In this case, the agent has its client.keys file empty.

    Args:
        configure_authd_server (fixture): Initialize a simulated authd connection.
        start_authd (fixture): Enable authd to accept connections and perform enrollments.
        set_authd_id (fixture): Set agent id to 101 in the authd simulated connection.
        clean_keys (fixture): Clear the agent's client.keys file.
        configure_environment (fixture): Configure a custom environment for testing.
        get_configuration (fixture): Get configurations from the module.
    """
    global remoted_server

    remoted_server = RemotedSimulator(
        protocol=get_configuration['metadata']['PROTOCOL'],
        mode='CONTROLLED_ACK',
        client_keys=CLIENT_KEYS_PATH)

    # Stop target Agent
    control_service('stop')
    # Clean logs
    truncate_file(LOG_FILE_PATH)
    # Prepare test
    start_authd()
    set_authd_id()
    clean_keys()
    # Start target Agent
    control_service('start')

    # start hearing logs
    log_monitor = FileMonitor(LOG_FILE_PATH)

    # hearing on enrollment server
    authd_server.clear()

    # Wait until Agent asks keys for the first time
    log_monitor.start(
        timeout=120,
        callback=wait_enrollment,
        error_message=
        "Agent never enrolled for the first time rejecting connection!")

    # Wait until Agent is notifying Manager
    log_monitor.start(
        timeout=120,
        callback=wait_notify,
        error_message="Notify message from agent was never sent!")
    assert "aes" in remoted_server.last_message_ctx, "Incorrect Secure Message"

    # Start rejecting Agent
    remoted_server.set_mode('REJECT')
    # hearing on enrollment server
    authd_server.clear()
    # Wait until Agent asks a new key to enrollment
    log_monitor.start(
        timeout=180,
        callback=wait_enrollment,
        error_message="Agent never enrolled after rejecting connection!")

    # Start responding to Agent
    remoted_server.set_mode('CONTROLLED_ACK')
    # Wait until Agent is notifying Manager
    log_monitor.start(
        timeout=120,
        callback=wait_notify,
        error_message="Notify message from agent was never sent!")
    assert "aes" in remoted_server.last_message_ctx, "Incorrect Secure Message"
# reset_client_keys
clean_client_keys_file()

# Start Wazuh
control_service('start')
"""Wait until agentd has begun"""


def callback_agentd_startup(line):
    if 'Accepting connections on port 1515' in line:
        return line
    return None


log_monitor = FileMonitor(LOG_FILE_PATH)
log_monitor.start(timeout=30, callback=callback_agentd_startup)


# @pytest.mark.parametrize('test_case', [case['test_case'] for case in ssl_configuration_tests])
def test_ossec_auth_name_ip_pass(get_configuration, configure_environment,
                                 configure_sockets_environment):
    """Check that every input message in authd port generates the adequate output

    Parameters
    ----------
    test_case : list
        List of test_cases, dict with following keys:
            - input: message that will be tried to send to the manager
            - output: expected response
            - insert_prev_agent: yes or no (for duplicated ip or name cases)
                1) if insert_prev_agent_custom is present: previous input message is overwrite by the custom message
Esempio n. 26
0
def test_wpk_manager(set_debug_mode, get_configuration, configure_environment,
                     restart_service, configure_agents):
    metadata = get_configuration.get('metadata')
    protocol = metadata['protocol']
    expected_status = metadata['status']
    sender = Sender(SERVER_ADDRESS, protocol=protocol)
    log_monitor = FileMonitor(LOG_FILE_PATH)
    expected_error_msg = metadata.get('error_msg')
    sha_list = metadata.get('sha_list')
    injectors = []
    file_name = ''
    installer = ''

    if 'VALIDSHA1' in sha_list:
        sha_list = get_sha_list(metadata)

    command = 'upgrade'
    if metadata.get('command') == 'upgrade_custom':
        command = 'upgrade_custom'
        if not expected_error_msg or ('The WPK file does not exist' not in expected_error_msg):
            file_name = metadata.get('message_params').get('file_path')
            file = os.path.join(UPGRADE_PATH, file_name)
            create_wpk_custom_file(file)
            metadata['message_params']['file_path'] = file
            sha_list = [hashlib.sha1(open(file, 'rb').read()).hexdigest()]
        if metadata.get('message_params').get('installer'):
            installer = metadata.get('message_params').get('installer')
        else:
            installer = 'upgrade.sh'

    for index, agent in enumerate(agents):
        agent.set_wpk_variables(sha_list[index],
                                metadata['upgrade_exec_result'][index],
                                metadata['upgrade_notification'][index],
                                metadata['upgrade_script_result'][index],
                                stage_disconnect=metadata['stage_disconnect'][index])
        injector = Injector(sender, agent)
        injectors.append(injector)
        injector.run()
        if protocol == "tcp":
            sender = Sender(manager_address=SERVER_ADDRESS, protocol=protocol)

    agents_id = [int(x.id) for x in agents]

    data = {
        'command': command,
        'parameters': {'agents': agents_id}
    }

    # If have params for test case add to the data to send
    if metadata.get('message_params'):
        data['parameters'].update(metadata.get('message_params'))

    # remove wpk if need check http or version
    if metadata.get('checks') and ('use_http' in metadata.get('checks') or 'version' in metadata.get('checks')):
        remove_wpk_package()

    # Give time for registration key to be available and send a few heartbeats
    time.sleep(40)

    # Send upgrade request
    response = send_message(data, UPGRADE_SOCKET)

    if metadata.get('checks') and (('use_http' in metadata.get('checks')) or ('version' in metadata.get('checks'))):
        # Checking version or http in logs
        try:
            log_monitor.start(timeout=60, callback=wait_download)
        except TimeoutError as err:
            raise AssertionError("Download wpk log took too much!")

        last_log = log_monitor.result()
        if 'use_http' in metadata.get('checks'):
            if metadata.get('message_params') and \
                metadata.get('message_params').get('use_http') and \
                    metadata.get('message_params').get('use_http'):
                assert "'http://" in last_log, "Use http protocol did not match expected! Expected 'http://'"
            else:
                assert "'https://" in last_log, "Use http protocol did not match expected! Expected 'https://'"

        if 'version' in metadata.get('checks'):
            if metadata.get('message_params') and \
                    metadata.get('message_params').get('version'):
                assert metadata.get('message_params').get('version') in \
                    last_log, f'Versions did not match expected! \
                                Expected {metadata.get("message_params").get("version")}'
            else:
                assert MANAGER_VERSION in last_log, \
                    f'Versions did not match expected! Expected {MANAGER_VERSION}'
        # let time to download wpk
        try:
            log_monitor.start(timeout=600, callback=wait_downloaded)
        except TimeoutError as err:
            raise AssertionError("Finish download wpk log took too much!")
        # time.sleep(60)

    if metadata.get('checks') and ('chunk_size' in metadata.get('checks')):
        # Checking version in logs
        try:
            log_monitor.start(timeout=60, callback=wait_chunk_size)
        except TimeoutError as err:
            raise AssertionError("Chunk size log tooks too much!")
        chunk = metadata.get('chunk_size')
        last_log = log_monitor.result()
        assert f'com write {chunk}' in last_log, \
            f'Chunk size did not match expected! Expected {chunk} obtained {last_log}'

    if metadata.get('checks') and ('wpk_name' in metadata.get('checks')):
        # Checking version in logs
        try:
            log_monitor.start(timeout=180, callback=wait_wpk_custom)
        except TimeoutError as err:
            raise AssertionError("Custom wpk log tooks too much!")

        last_log = log_monitor.result()
        assert f'com upgrade {file_name} {installer}' in last_log, \
            f'Wpk custom package did not match expected! ' \
            f'Expected {metadata.get("message_params").get("file_path")} obtained {last_log}'

    if metadata.get('first_attempt'):
        # Chech that result of first attempt is Success
        assert 'Success' == response['data'][0]['message'], \
            f'First upgrade response did not match expected! ' \
            f'Expected {metadata.get("expected_response")} obtained {response["data"][0]["message"]}'

        repeat_message = data
        # Continue with the validations of first attempt
        task_ids = [item.get('agent') for item in response['data']]
        for index, agent_id in enumerate(task_ids):
            data = {
                "origin": {
                    "module": "api"
                },
                "command": 'upgrade_result',
                "parameters": {
                    "agents": [agent_id]
                }
            }
            time.sleep(30)
            response = send_message(data, TASK_SOCKET)
            retries = 0
            while (response['data'][0]['status'] != metadata.get('first_attempt')) \
                    and (retries < 10):
                time.sleep(30)
                response = send_message(data, TASK_SOCKET)
                retries += 1
            assert metadata.get('first_attempt') == response['data'][0]['status'], \
                f'First upgrade status did not match expected! ' \
                f'Expected {metadata.get("first_attempt")} obtained {response["data"][0]["status"]}'

        # send upgrade request again
        response = send_message(repeat_message, UPGRADE_SOCKET)

    if metadata.get('expected_response') == 'Success':
        # Chech that result is expected
        assert metadata.get('expected_response') == response['data'][0]['message'], \
            f'Upgrade response did not match expected! ' \
            f'Expected {metadata.get("expected_response")} obtained {response["data"][0]["message"]}'

        # Continue with the test validations
        task_ids = [item.get('agent') for item in response['data']]
        for index, agent_id in enumerate(task_ids):
            data = {
                "origin": {
                    "module": "api"
                },
                "command": 'upgrade_result',
                "parameters": {
                    "agents": [agent_id]
                }
            }
            time.sleep(30)
            response = send_message(data, TASK_SOCKET)
            retries = 0
            while response['data'][0]['status'] == 'Updating' and retries < 30 and \
                    response['data'][0]['status'] != expected_status[index]:
                time.sleep(30)
                response = send_message(data, TASK_SOCKET)
                retries += 1
            assert expected_status[index] == response['data'][0]['status'], \
                f'Upgrade status did not match expected! ' \
                f'Expected {expected_status[index]} obtained {response["data"][0]["status"]} at index {index}'
            if expected_status[index] == 'Error':
                assert expected_error_msg[index] == response['data'][0]['error_msg'], \
                    f'Error msg did not match expected! ' \
                    f'Expected {expected_error_msg[index]} obtained {response["data"][0]["error_msg"]} at index {index}'
    else:
        assert metadata.get('expected_response') == response['data'][0]['message'], \
            f'Upgrade response did not match expected! ' \
            f'Expected {metadata.get("expected_response")} obtained {response["data"][0]["message"]}'

    for injector in injectors:
        injector.stop_receive()

    time.sleep(3)  # Wait for agents threads to stop
def test_agentd_initial_enrollment_retries(configure_authd_server,
                                           configure_environment,
                                           get_configuration):
    """Check how the agent behaves when it makes multiple enrollment attempts before getting its key.

    For this, the agent starts without keys and perform multiple enrollment requests
    to authd before getting the new key to communicate with remoted.

    Args:
        configure_authd_server (fixture): Initialize a simulated authd connection.
        stop_authd (fixture): Disable authd to accept connections and perform enrollments.
        set_authd_id (fixture): Set agent id to 101 in the authd simulated connection.
        clean_keys (fixture): Clear the agent's client.keys file.
        configure_environment (fixture): Configure a custom environment for testing.
        get_configuration (fixture): Get configurations from the module.
    """
    global remoted_server

    remoted_server = RemotedSimulator(
        protocol=get_configuration['metadata']['PROTOCOL'],
        mode='CONTROLLED_ACK',
        client_keys=CLIENT_KEYS_PATH)

    # Stop target Agent
    control_service('stop')
    # Clean logs
    truncate_file(LOG_FILE_PATH)
    # Preapre test
    stop_authd()
    set_authd_id()
    clean_keys()
    # Start whole Agent service to check other daemons status after initialization
    control_service('start')

    # Start hearing logs
    log_monitor = FileMonitor(LOG_FILE_PATH)

    start_time = datetime.now()
    # Check for unsuccessful enrollment retries in Agentd initialization
    retries = 0
    while retries < 4:
        retries += 1
        log_monitor.start(timeout=retries * 5 + 20,
                          callback=wait_enrollment_try,
                          error_message="Enrollment retry was not sent!")
    stop_time = datetime.now()
    expected_time = start_time + timedelta(seconds=retries * 5 - 2)
    # Check if delay was applied
    assert stop_time > expected_time, "Retries too quick"

    # Enable authd
    authd_server.clear()
    authd_server.set_mode("ACCEPT")
    # Wait successfully enrollment
    # Wait succesfull enrollment
    log_monitor.start(timeout=70,
                      callback=wait_enrollment,
                      error_message="No succesful enrollment after reties!")

    # Wait until Agent is notifying Manager
    log_monitor.start(
        timeout=120,
        callback=wait_notify,
        error_message="Notify message from agent was never sent!")

    # Check if no Wazuh module stopped due to Agentd Initialization
    with open(LOG_FILE_PATH) as log_file:
        log_lines = log_file.read().splitlines()
        for line in log_lines:
            if "Unable to access queue:" in line:
                raise AssertionError(
                    "A Wazuh module stopped because of Agentd initialization!")
Esempio n. 28
0
def test_configuration_age_basic(get_local_internal_options,
                                 configure_local_internal_options,
                                 get_files_list,
                                 create_file_structure_function,
                                 get_configuration, configure_environment,
                                 restart_logcollector):
    """Check if logcollector works correctly and uses the specified age value.

    Check that those files that have not been modified for a time greater than age value, are ignored for logcollector.
    Otherwise, files should not be ignored. Also, it checks logcollector detect modification time changes in monitored
    files and catch new logs from ignored and not ignored files.

    Raises:
        TimeoutError: If the expected callbacks are not generated.
    """

    cfg = get_configuration['metadata']
    age_seconds = time_to_seconds(cfg['age'])

    for file in file_structure:
        for name in file['filename']:
            absolute_file_path = os.path.join(file['folder_path'], name)
            wazuh_log_monitor = FileMonitor(LOG_FILE_PATH)

            log_callback = logcollector.callback_match_pattern_file(
                cfg['location'], absolute_file_path)
            wazuh_log_monitor.start(timeout=5,
                                    callback=log_callback,
                                    error_message=f"{name} was not detected")

            if int(age_seconds) <= int(file['age']):
                log_callback = logcollector.callback_ignoring_file(
                    absolute_file_path)
                wazuh_log_monitor.start(
                    timeout=5,
                    callback=log_callback,
                    error_message=f"{name} was not ignored")

            else:
                with pytest.raises(TimeoutError):
                    log_callback = logcollector.callback_ignoring_file(
                        absolute_file_path)
                    wazuh_log_monitor.start(
                        timeout=5,
                        callback=log_callback,
                        error_message=f"{name} was not ignored")

    for file in file_structure:
        for name in file['filename']:
            absolute_file_path = os.path.join(file['folder_path'], name)
            with open(absolute_file_path, 'a') as file_to_write:
                file_to_write.write(file['content'])

            log_callback = logcollector.callback_reading_syslog_message(
                file['content'][:-1])
            wazuh_log_monitor.start(
                timeout=10,
                callback=log_callback,
                error_message=f"No syslog message received from {name}")

            log_callback = logcollector.callback_read_line_from_file(
                1, absolute_file_path)
            wazuh_log_monitor.start(timeout=10,
                                    callback=log_callback,
                                    error_message=f"No lines read from {name}")
def test_agentd_parametrized_reconnections(configure_authd_server, start_authd,
                                           stop_agent, set_keys,
                                           configure_environment,
                                           get_configuration):
    """Check how the agent behaves when there are delays between connection attempts to the server.

    For this purpose, different values for max_retries and retry_interval parameters are tested.

    Args:
        configure_authd_server (fixture): Initialize a simulated authd connection.
        start_authd (fixture): Enable authd to accept connections and perform enrollments.
        stop_agent (fixture): Stop Wazuh's agent.
        set_keys (fixture): Write to client.keys file the agent's enrollment details.
        configure_environment (fixture): Configure a custom environment for testing.
        get_configuration (fixture): Get configurations from the module.
    """
    DELTA = 1
    RECV_TIMEOUT = 5
    ENROLLMENT_SLEEP = 20
    LOG_TIMEOUT = 30

    global remoted_server

    PROTOCOL = protocol = get_configuration['metadata']['PROTOCOL']
    RETRIES = get_configuration['metadata']['MAX_RETRIES']
    INTERVAL = get_configuration['metadata']['RETRY_INTERVAL']
    ENROLL = get_configuration['metadata']['ENROLL']

    control_service('stop')
    clean_logs()
    log_monitor = FileMonitor(LOG_FILE_PATH)
    remoted_server = RemotedSimulator(protocol=PROTOCOL,
                                      client_keys=CLIENT_KEYS_PATH)
    control_service('start')

    # 2 Check for unsuccessful connection retries in Agentd initialization
    interval = INTERVAL
    if PROTOCOL == 'udp':
        interval += RECV_TIMEOUT

    if ENROLL == 'yes':
        total_retries = RETRIES + 1
    else:
        total_retries = RETRIES

    for retry in range(total_retries):
        # 3 If auto enrollment is enabled, retry check enrollment and retries after that
        if ENROLL == 'yes' and retry == total_retries - 1:
            # Wait successfully enrollment
            try:
                log_monitor.start(timeout=20, callback=wait_enrollment)
            except TimeoutError as err:
                raise AssertionError("No successful enrollment after retries!")
            last_log = parse_time_from_log_line(log_monitor.result())

            # Next retry will be after enrollment sleep
            interval = ENROLLMENT_SLEEP

        try:
            log_monitor.start(timeout=interval + LOG_TIMEOUT,
                              callback=wait_connect)
        except TimeoutError as err:
            raise AssertionError("Connection attempts took too much!")
        actual_retry = parse_time_from_log_line(log_monitor.result())
        if retry > 0:
            delta_retry = actual_retry - last_log
            # Check if delay was applied
            assert delta_retry >= timedelta(seconds=interval -
                                            DELTA), "Retries to quick"
            assert delta_retry <= timedelta(seconds=interval +
                                            DELTA), "Retries to slow"
        last_log = actual_retry

    # 4 Wait for server rollback
    try:
        log_monitor.start(timeout=30, callback=wait_server_rollback)
    except TimeoutError as err:
        raise AssertionError("Server rollback took too much!")

    # 5 Check amount of retries and enrollment
    (connect, enroll) = count_retry_mesages()
    assert connect == total_retries
    if ENROLL == 'yes':
        assert enroll == 1
    else:
        assert enroll == 0

    return
Esempio n. 30
0
def recursion_test(dirname,
                   subdirname,
                   recursion_level,
                   timeout=1,
                   edge_limit=2,
                   ignored_levels=1,
                   is_scheduled=False):
    """
    Check that events are generated in the first and last `edge_limit` directory levels in the hierarchy
    dirname/subdirname1/.../subdirname{recursion_level}. It also checks that no events are generated for
    subdirname{recursion_level+ignored_levels}. All directories and subdirectories needed will be created using the info
    provided by parameter.

    Example:
        recursion_level = 10
        edge_limit = 2
        ignored_levels = 2

        dirname = "/testdir"
        subdirname = "subdir"

        With those parameters this function will create files and expect to detect 'added', 'modified' and 'deleted'
        events for the following directories only, as they are the first and last 2 subdirectories within recursion
        level 10:

        /testdir/subdir1
        /testdir/subdir1/subdir2
        /testdir/subdir1/subdir2/subdir3/subdir4/subdir5/subdir6/subdir7/subdir8/subdir9/
        /testdir/subdir1/subdir2/subdir3/subdir4/subdir5/subdir6/subdir7/subdir8/subdir9/subdir10

        As ignored_levels value is 2, this function will also create files on the following directories and ensure that
        no events are raised as they are outside the recursion level specified:

        /testdir/subdir1/subdir2/subdir3/subdir4/subdir5/subdir6/subdir7/subdir8/subdir9/subdir10/subdir11
        /testdir/subdir1/subdir2/subdir3/subdir4/subdir5/subdir6/subdir7/subdir8/subdir9/subdir10/subdir11/subdir12

    This function also takes into account that a very long path will raise a FileNotFound Exception on Windows because
    of its path length limitations. In a similar way, on Linux environments a `Event Too Long` will be raised if the
    path name is too long.

    Parameters
    ----------
    dirname : str
        The path being monitored by syscheck (indicated in the .conf file).
    subdirname : str
        The name of the subdirectories that will be created during the execution for testing purposes.
    recursion_level : int
        Recursion level. Also used as the number of subdirectories to be created and checked for the current test.
    timeout : int
        Max time to wait until an event is raised.
    edge_limit : int
        Number of directories where the test will monitor events.
    ignored_levels : int
        Number of directories exceeding the specified recursion_level to verify events are not raised.
    is_scheduled : bool
        If True the internal date will be modified to trigger scheduled checks by syschecks.
        False if realtime or Whodata.
    """
    path = dirname
    try:
        # Check True (Within the specified recursion level)
        for n in range(recursion_level):
            path = os.path.join(path, subdirname + str(n + 1))
            if ((recursion_level < edge_limit * 2)
                    or (recursion_level >= edge_limit * 2 and n < edge_limit)
                    or (recursion_level >= edge_limit * 2
                        and n > recursion_level - edge_limit)):
                regular_file_cud(path,
                                 wazuh_log_monitor,
                                 time_travel=is_scheduled,
                                 min_timeout=timeout)

        # Check False (exceeding the specified recursion_level)
        for n in range(recursion_level, recursion_level + ignored_levels):
            path = os.path.join(path, subdirname + str(n + 1))
            regular_file_cud(path,
                             wazuh_log_monitor,
                             time_travel=is_scheduled,
                             min_timeout=timeout,
                             triggers_event=False)

    except TimeoutError:
        timeout_log_monitor = FileMonitor(LOG_FILE_PATH)
        if timeout_log_monitor.start(
                timeout=5, callback=callback_audit_event_too_long).result():
            pytest.fail("Audit raised 'Event Too Long' message.")
        raise

    except FileNotFoundError as ex:
        MAX_PATH_LENGTH_WINDOWS_ERROR = 206
        if ex.winerror != MAX_PATH_LENGTH_WINDOWS_ERROR:
            raise

    except OSError as ex:
        MAX_PATH_LENGTH_MACOS_ERROR = 63
        MAX_PATH_LENGTH_SOLARIS_ERROR = 78
        if ex.errno not in (MAX_PATH_LENGTH_SOLARIS_ERROR,
                            MAX_PATH_LENGTH_MACOS_ERROR):
            raise