Example #1
0
    def run(self):
        """This method creates and destroy the needed processes for the messages founded in messages_path.
        It creates one file composer (process) for every file to be monitored in every host."""
        for host, payload in self.test_cases.items():
            self._monitored_files.update({case['path'] for case in payload})
            if len(self._monitored_files) == 0:
                raise AttributeError('There is no path to monitor. Exiting...')
            for path in self._monitored_files:
                output_path = f'{host}_{path.split("/")[-1]}.tmp'
                self._file_content_collectors.append(self.file_composer(host=host, path=path, output_path=output_path))
                logger.debug(f'Add new file composer process for {host} and path: {path}')
                self._file_monitors.append(self._start(host=host, payload=payload, path=output_path))
                logger.debug(f'Add new file monitor process for {host} and path: {path}')

        while True:
            if not any([handler.is_alive() for handler in self._file_monitors]):
                for handler in self._file_monitors:
                    handler.join()
                for file_collector in self._file_content_collectors:
                    file_collector.terminate()
                    file_collector.join()
                self.clean_tmp_files()
                break
            time.sleep(self._time_step)
        self.check_result()
Example #2
0
    def file_composer(self, host, path, output_path):
        """Collects the file content of the specified path in the desired host and append it to the output_path file.
        Simulates the behavior of tail -f and redirect the output to output_path.

        Parameters
        ----------
        host : str
            Hostname.
        path : str
            Host file path to be collect.
        output_path : str
            Output path of the content collected from the remote host path.
        """
        try:
            truncate_file(os.path.join(self._tmp_path, output_path))
        except FileNotFoundError:
            pass
        logger.debug(f'Starting file composer for {host} and path: {path}. '
                     f'Composite file in {os.path.join(self._tmp_path, output_path)}')
        tmp_file = os.path.join(self._tmp_path, output_path)
        while True:
            with FileLock(tmp_file):
                with open(tmp_file, "r+") as file:
                    content = self.host_manager.get_file_content(host, path).split('\n')
                    file_content = file.read().split('\n')
                    for new_line in content:
                        if new_line == '':
                            continue
                        if new_line not in file_content:
                            file.write(f'{new_line}\n')
                time.sleep(self._time_step)
def test_command_execution_freq(get_local_internal_options,
                                configure_local_internal_options,
                                get_configuration, configure_environment,
                                restart_logcollector):
    """Check if the Wazuh run correctly with the specified command monitoring option "frequency".

    For this purpose, it is verified that the command has not been executed
    before the period established in this option.

    Args:
        get_local_internal_options (fixture): Get internal configuration.
        configure_local_internal_options (fixture): Set internal configuration.
        get_configuration (fixture): Get configurations from the module.
        configure_environment (fixture): Configure a custom environment for testing.
        restart_logcollector (fixture): Reset log file and start a new monitor.

    Raises:
        TimeoutError: If the command monitoring callback is not generated.
    """
    config = get_configuration['metadata']
    log_callback = logcollector.callback_running_command(
        log_format=config['log_format'],
        command=config['command'],
        prefix=LOG_COLLECTOR_DETECTOR_PREFIX)
    seconds_to_travel = config[
        'frequency'] / 2  # Middle of the command execution cycle.

    wazuh_log_monitor.start(
        timeout=global_parameters.default_timeout,
        callback=log_callback,
        error_message=logcollector.GENERIC_CALLBACK_ERROR_COMMAND_MONITORING)

    before = str(datetime.now())
    TimeMachine.travel_to_future(timedelta(seconds=seconds_to_travel))
    logger.debug(
        f"Changing the system clock from {before} to {datetime.now()}")

    # The command should not be executed in the middle of the command execution cycle.
    with pytest.raises(TimeoutError):
        wazuh_log_monitor.start(timeout=global_parameters.default_timeout,
                                callback=log_callback,
                                error_message=logcollector.
                                GENERIC_CALLBACK_ERROR_COMMAND_MONITORING)

    before = str(datetime.now())
    TimeMachine.travel_to_future(timedelta(seconds=seconds_to_travel))
    logger.debug(
        f"Changing the system clock from {before} to {datetime.now()}")

    wazuh_log_monitor.start(
        timeout=global_parameters.default_timeout,
        callback=log_callback,
        error_message=logcollector.GENERIC_CALLBACK_ERROR_COMMAND_MONITORING)

    # Restore the system clock.
    TimeMachine.time_rollback()
Example #4
0
 def check_result(self):
     """Check if a TimeoutError occurred."""
     logger.debug(f'Checking results...')
     while not self._queue.empty():
         result = self._queue.get(block=True)
         for host, msg in result.items():
             if isinstance(msg, TimeoutError):
                 raise msg
             logger.debug(f'Received from {host} the expected message: {msg}')
             self._result[host].append(msg)
Example #5
0
    def _start(self,
               host,
               payload,
               path,
               encoding=None,
               error_messages_per_host=None):
        """Start the file monitoring until the QueueMonitor returns an string or TimeoutError.

        Args:
            host (str): Hostname
            payload (list,dict): Contains the message to be found and the timeout for it.
            path (str): Path where it must search for the message.
            encoding (str): Encoding of the file.
            error_messages_per_host (dict): Dictionary with hostnames as keys and desired error messages as values
        Returns:
            Instance of HostMonitor
        """
        tailer = FileTailer(os.path.join(self._tmp_path, path),
                            time_step=self._time_step)
        try:
            if encoding is not None:
                tailer.encoding = encoding
            tailer.start()
            for case in payload:
                logger.debug(
                    f'Starting QueueMonitor for {host} and message: {case["regex"]}'
                )
                monitor = QueueMonitor(tailer.queue, time_step=self._time_step)
                try:
                    self._queue.put({
                        host:
                        monitor.start(
                            timeout=case['timeout'],
                            callback=callback_generator(case['regex']),
                            update_position=False).result().strip('\n')
                    })
                except TimeoutError:
                    try:
                        self._queue.put({host: error_messages_per_host[host]})
                    except (KeyError, TypeError):
                        self._queue.put({
                            host:
                            TimeoutError(
                                f'Did not found the expected callback in {host}: {case["regex"]}'
                            )
                        })
                logger.debug(
                    f'Finishing QueueMonitor for {host} and message: {case["regex"]}'
                )
        finally:
            tailer.shutdown()

        return self
Example #6
0
def configure_syscheck_environment(time_sleep):
    # Create every needed directory
    for n in range(n_directories):
        t_dir = os.path.join(PREFIX, f'{testdir}{n}')
        os.makedirs(t_dir, exist_ok=True, mode=0o777)
        directories_list.append(t_dir)

    wazuh_log_monitor = FileMonitor(LOG_FILE_PATH)
    control_service('restart')
    logger.debug('Waiting 15 seconds for syscheckd to start.')
    time.sleep(15)

    file = 'regular'

    logger.debug(
        f'Waiting {str(time_sleep)} seconds. Execute `generate_windows_yaml.py` now.'
    )
    time.sleep(time_sleep)

    logger.debug('Creating files...')
    for directory in directories_list:
        create_file(REGULAR, directory, file, content='')
        time.sleep(0.01)
    try:
        while True:
            wazuh_log_monitor.start(timeout=5, callback=callback_detect_event)
    except TimeoutError:
        pass

    logger.debug('Modifying files...')
    for directory in directories_list:
        modify_file(directory, file, new_content='Modified')
        time.sleep(0.01)
    try:
        while True:
            wazuh_log_monitor.start(timeout=5, callback=callback_detect_event)
    except TimeoutError:
        pass

    logger.debug('Deleting files...')
    for directory in directories_list:
        delete_file(directory, file)
        time.sleep(0.01)
    try:
        while True:
            wazuh_log_monitor.start(timeout=5, callback=callback_detect_event)
    except TimeoutError:
        pass
def generate_analysisd_yaml(n_events, modify_events):
    def parse_events_into_yaml(requests, yaml_file):
        yaml_result = []
        with open(yaml_file, 'a') as y_f:
            id_ev = 0
            for req, event in requests:
                type_ev = event['data']['type']
                stage_ev = type_ev.title()
                mode = None
                agent_id = callback_analysisd_agent_id(req) or '000'

                del event['data']['mode']
                del event['data']['type']
                if 'tags' in event['data']:
                    del event['data']['tags']
                if type_ev == 'added':
                    mode = 'save2'
                    output_ev = json.dumps(event['data'])

                elif type_ev == 'deleted':
                    mode = 'delete'
                    output_ev = json.dumps(event['data']['path']).replace(
                        '"', '')

                elif type_ev == 'modified':
                    mode = 'save2'
                    for field in [
                            'old_attributes', 'changed_attributes',
                            'content_changes'
                    ]:
                        if field in event['data']:
                            del event['data'][field]
                    output_ev = json.dumps(event['data'])

                yaml_result.append({
                    'name':
                    f"{stage_ev}{id_ev}",
                    'test_case': [{
                        'input': f"{req}",
                        'output':
                        f"agent {agent_id} syscheck {mode} {output_ev}",
                        'stage': f"{stage_ev}"
                    }]
                })
                id_ev += 1
            y_f.write(yaml.safe_dump(yaml_result))

    def remove_logs():
        for root, dirs, files in os.walk(WAZUH_LOGS_PATH):
            for file in files:
                os.remove(os.path.join(root, file))

    # Restart syscheckd with the new configuration
    truncate_file(LOG_FILE_PATH)
    control_service('stop')
    check_daemon_status(running=False)

    remove_logs()

    control_service('start', daemon='ossec-analysisd', debug_mode=True)
    check_daemon_status(running=True, daemon='ossec-analysisd')

    mitm_analysisd = ManInTheMiddle(address=analysis_path,
                                    family='AF_UNIX',
                                    connection_protocol='UDP')
    analysis_queue = mitm_analysisd.queue
    mitm_analysisd.start()

    control_service('start', daemon='ossec-remoted', debug_mode=True)
    check_daemon_status(running=True, daemon='ossec-remoted')

    analysis_monitor = QueueMonitor(analysis_queue)

    while True:
        try:
            grep = subprocess.Popen(['grep', 'deleted', alerts_json],
                                    stdout=subprocess.PIPE)
            wc = int(
                subprocess.check_output([
                    'wc',
                    '-l',
                ], stdin=grep.stdout).decode())
        except subprocess.CalledProcessError:
            wc = 0
        if wc >= n_events:
            logging.debug('All alerts received. Collecting by alert type...')
            break
        logger.debug(f'{wc} deleted events so far.')
        logger.debug('Waiting for alerts. Sleeping 5 seconds.')
        time.sleep(5)

    added = analysis_monitor.start(timeout=max(0.01 * n_events, 10),
                                   callback=callback_analysisd_event,
                                   accum_results=n_events).result()
    logger.debug('"added" alerts collected.')

    modified = analysis_monitor.start(timeout=max(0.01 * n_events, 10),
                                      callback=callback_analysisd_event,
                                      accum_results=modify_events).result()
    logger.debug('"modified" alerts collected.')

    deleted = analysis_monitor.start(timeout=max(0.01 * n_events, 10),
                                     callback=callback_analysisd_event,
                                     accum_results=n_events).result()
    logger.debug('"deleted" alerts collected.')

    # Truncate file
    with open(yaml_file, 'w') as y_f:
        y_f.write(f'---\n')

    for ev_list in [added, modified, deleted]:
        parse_events_into_yaml(ev_list, yaml_file)
    logger.debug(f'YAML done: "{yaml_file}"')

    return mitm_analysisd
def configure_syscheck_environment(time_sleep):
    # Create every needed directory
    for n in range(n_windows_registry):
        t_dir = f'{testreg}{n}'
        create_registry(registry_parser[KEY], f'{testreg}{n}', KEY_WOW64_64KEY)
        reg_list.append(t_dir)

    control_service('restart')
    logger.debug('Waiting 15 seconds for syscheckd to start.')
    time.sleep(15)

    reg_key = 'reg_key'
    reg_value = 'value_name'

    logger.debug(
        f'Waiting {str(time_sleep)} seconds. Execute `generate_windows_yaml.py` now.'
    )
    time.sleep(time_sleep)

    logger.debug(f'Waiting {SCAN_WAIT} seconds for baseline scan to finish.')
    time.sleep(120)

    logger.debug('Creating registries...')
    for registry in reg_list:
        key_h = create_registry(registry_parser[KEY],
                                os.path.join(registry, reg_key),
                                KEY_WOW64_64KEY)
        modify_registry_value(key_h, reg_value, REG_SZ, 'added')

    TimeMachine.travel_to_future(timedelta(hours=13))

    logger.debug(f'Waiting {SCAN_WAIT} seconds for scan to finish.')
    time.sleep(SCAN_WAIT)

    logger.debug('Modifying registries...')
    for registry in reg_list:
        modify_key_perms(
            registry_parser[KEY], os.path.join(registry, reg_key),
            KEY_WOW64_64KEY,
            LookupAccountName(None, f"{platform.node()}\\{os.getlogin()}")[0])
        modify_registry_owner(
            registry_parser[KEY], os.path.join(registry, reg_key),
            KEY_WOW64_64KEY,
            LookupAccountName(None, f"{platform.node()}\\{os.getlogin()}")[0])
        key_h = RegOpenKeyEx(registry_parser[KEY],
                             os.path.join(registry, reg_key), 0,
                             KEY_ALL_ACCESS | KEY_WOW64_64KEY)
        modify_registry_value(key_h, reg_value, REG_SZ, 'modified')

    TimeMachine.travel_to_future(timedelta(hours=13))

    logger.debug(f'Waiting {SCAN_WAIT} seconds for scan to finish.')
    time.sleep(SCAN_WAIT)

    logger.debug('Deleting registries...')
    for registry in reg_list:
        delete_registry(registry_parser[KEY], os.path.join(registry, reg_key),
                        KEY_WOW64_64KEY)

    TimeMachine.travel_to_future(timedelta(hours=13))

    logger.debug(f'Waiting {SCAN_WAIT} seconds for scan to finish.')
    time.sleep(SCAN_WAIT)
Example #9
0
 def clean_tmp_files(self):
     """Remove tmp files."""
     logger.debug(f'Cleaning temporal files...')
     for file in os.listdir(self._tmp_path):
         os.remove(os.path.join(self._tmp_path, file))
Example #10
0
def generate_analysisd_yaml(n_events, modify_events):
    def parse_events_into_yaml(requests, yaml_file):
        yaml_result = []
        with open(yaml_file, 'a') as y_f:
            id_ev = 0
            for req, event in requests:
                type_ev = event['data']['type']
                stage_ev = type_ev.title()
                mode = None
                agent_id = callback_analysisd_agent_id(req) or '000'

                del event['data']['mode']
                del event['data']['type']
                if 'tags' in event['data']:
                    del event['data']['tags']
                if type_ev == 'added':
                    mode = 'save2'
                    output_ev = json.dumps(event['data'])

                elif type_ev == 'deleted':
                    mode = 'delete'
                    output_ev = json.dumps(event['data']['path']).replace(
                        '"', '')

                elif type_ev == 'modified':
                    mode = 'save2'
                    for field in [
                            'old_attributes', 'changed_attributes',
                            'content_changes'
                    ]:
                        if field in event['data']:
                            del event['data'][field]
                    output_ev = json.dumps(event['data'])

                yaml_result.append({
                    'name':
                    f"{stage_ev}{id_ev}",
                    'test_case': [{
                        'input': f"{req}",
                        'output':
                        f"agent {agent_id} syscheck {mode} {output_ev}",
                        'stage': f"{stage_ev}"
                    }]
                })
                id_ev += 1
            y_f.write(yaml.safe_dump(yaml_result))

    def remove_logs():
        for root, dirs, files in os.walk(WAZUH_LOGS_PATH):
            for file in files:
                os.remove(os.path.join(root, file))

    file = 'regular'

    # Restart syscheckd with the new configuration
    truncate_file(LOG_FILE_PATH)
    file_monitor = FileMonitor(LOG_FILE_PATH)
    control_service('stop')
    check_daemon_status(running=False)
    remove_logs()

    control_service('start', daemon='wazuh-db', debug_mode=True)
    check_daemon_status(running=True, daemon='wazuh-db')

    control_service('start', daemon='wazuh-analysisd', debug_mode=True)
    check_daemon_status(running=True, daemon='wazuh-analysisd')

    mitm_analysisd = ManInTheMiddle(address=analysis_path,
                                    family='AF_UNIX',
                                    connection_protocol='UDP')
    analysis_queue = mitm_analysisd.queue
    mitm_analysisd.start()

    control_service('start', daemon='wazuh-syscheckd', debug_mode=True)
    check_daemon_status(running=True, daemon='wazuh-syscheckd')

    # Wait for initial scan
    detect_initial_scan(file_monitor)

    analysis_monitor = QueueMonitor(analysis_queue)

    for directory in directories_list:
        create_file(REGULAR, directory, file, content='')
        time.sleep(0.01)
    added = analysis_monitor.start(
        timeout=max(0.01 * n_events, 10),
        callback=callback_analysisd_event,
        accum_results=len(directories_list)).result()
    logger.debug('"added" alerts collected.')

    for directory in directories_list:
        modify_file(directory, file, new_content='Modified')
        time.sleep(0.01)
    modified = analysis_monitor.start(timeout=max(0.01 * n_events, 10),
                                      callback=callback_analysisd_event,
                                      accum_results=modify_events).result()
    logger.debug('"modified" alerts collected.')

    for directory in directories_list:
        delete_file(directory, file)
        time.sleep(0.01)
    deleted = analysis_monitor.start(
        timeout=max(0.01 * len(directories_list), 10),
        callback=callback_analysisd_event,
        accum_results=len(directories_list)).result()
    logger.debug('"deleted" alerts collected.')

    # Truncate file
    with open(yaml_file, 'w') as y_f:
        y_f.write(f'---\n')

    for ev_list in [added, modified, deleted]:
        parse_events_into_yaml(ev_list, yaml_file)
    logger.debug(f'YAML done: "{yaml_file}"')

    return mitm_analysisd
Example #11
0
def stats_collector(filename, daemon, agents_dict, attempts_info,
                    configuration):
    """Collect the stats of the current daemon until all agents have finished the integrity process.

    Parameters
    ----------
    filename : str
        Path of the stats file for the current daemon.
    daemon : str
        Daemon tested.
    agents_dict : dict of shared dict
        Dictionary with the start time of every agent.
    attempts_info : shared dict
        Dictionary with a flag that indicates the number of agents that exceed the limit of n_attempts.
    configuration : str
        Test configuration
    """
    stats = get_stats(daemon)
    old_stats = deepcopy(stats)
    diff = None
    stats_df = pd.DataFrame(
        columns=['configuration', 'seconds', *list(get_stats(daemon).keys())])
    stats_df = stats_df.astype(
        dtype={
            'configuration': 'object',
            'seconds': 'float32',
            'cpu': 'int8',
            'mem': 'int32',
            'rchar': 'float32',
            'wchar': 'float32',
            'syscr': 'float32',
            'syscw': 'float32',
            'read_bytes': 'float32',
            'write_bytes': 'float32',
            'cancelled_write_bytes': 'float32'
        })

    counter = 0
    while not attempts_info['finish'] and attempts_info['agents_failed'] < len(
            agents_dict.keys()):
        diff = calculate_stats(old_stats, stats)
        old_stats = deepcopy(stats)
        logger.debug(
            f'Stats {daemon} writing: {time.time()},{",".join(map(str, diff.values()))}'
        )
        stats_df = stats_df.append([{
            'configuration': configuration,
            'seconds': time.time(),
            **diff
        }],
                                   ignore_index=True)
        time.sleep(setup_environment_time)
        stats = get_stats(daemon)
        counter += 1
        if counter % dataframe_write_every_rows == 0:
            logger.debug(f'Writing {daemon} stats chunk')
            stats_df = append_to_dataframe(filename, stats_df)
            counter = 0

    if attempts_info['agents_failed'] >= len(agents_dict.keys()):
        logger.info(
            f'Configuration finished. All agents reached the max_n_attempts, '
            f'currently set up to {max_n_attempts}')

    # Avoid empty stats file in 0 files case
    if not diff:
        diff = calculate_stats(old_stats, stats)
        logger.info(
            f'Finishing stats {daemon} writing: {time.time()},{",".join(map(str, diff.values()))}'
        )
        stats_df = stats_df.append([{
            'configuration': configuration,
            'seconds': time.time(),
            **diff
        }],
                                   ignore_index=True)
    stats_df.to_csv(filename, index=False)
Example #12
0
def state_collector(agents_dict, configuration, stats_path, attempts_info):
    """Get the stats of the .state files in the WAZUH_PATH/var/run folder.
    We can define the stats to get from each daemon in the daemons_dict.

    Parameters
    ----------
    agents_dict : dict of shared dict
        Dictionary with the start time of every agent.
    configuration : str
        Test configuration
    stats_path : str
        Stats folder.
    attempts_info : shared dict
        Dictionary with a flag that indicates if the stats collector must start.
    """
    def get_csv(daemon_df='wazuh-analysisd'):
        filename = os.path.join(
            os.path.join(stats_path, f"state-{daemon_df}.csv"))
        if daemon_df == 'wazuh-analysisd':
            state_df = pd.DataFrame(columns=[
                'configuration', 'seconds', 'syscheck_events_decoded',
                'syscheck_edps', 'dbsync_queue_usage',
                'dbsync_messages_dispatched', 'dbsync_mdps', 'events_received',
                'events_dropped', 'syscheck_queue_usage', 'event_queue_usage'
            ])
            state_df = state_df.astype(
                dtype={
                    'configuration': 'object',
                    'seconds': 'float32',
                    'syscheck_events_decoded': 'float32',
                    'syscheck_edps': 'float32',
                    'dbsync_queue_usage': 'float32',
                    'dbsync_messages_dispatched': 'float32',
                    'dbsync_mdps': 'float32',
                    'events_received': 'float32',
                    'events_dropped': 'float32',
                    'syscheck_queue_usage': 'float32',
                    'event_queue_usage': 'float32'
                })
        elif daemon_df == 'wazuh-remoted':
            state_df = pd.DataFrame(columns=[
                'configuration', 'seconds', 'queue_size', 'tcp_sessions',
                'evt_count', 'discarded_count', 'recv_bytes'
            ])
            state_df = state_df.astype(
                dtype={
                    'configuration': 'object',
                    'seconds': 'float32',
                    'queue_size': 'float32',
                    'tcp_sessions': 'float32',
                    'evt_count': 'float32',
                    'discarded_count': 'float32',
                    'recv_bytes': 'float32'
                })
        else:
            raise NameError(f'Invalid daemon detected: {daemon_df}')

        return state_df

    daemons_dict = {
        'wazuh-analysisd': get_csv('wazuh-analysisd'),
        'wazuh-remoted': get_csv('wazuh-remoted')
    }

    states_exists = False
    counter = 0
    filename_analysisd = os.path.join(
        os.path.join(stats_path, "state-wazuh-analysisd.csv"))
    filename_remoted = os.path.join(
        os.path.join(stats_path, "state-wazuh-remoted.csv"))
    while not attempts_info['finish'] and attempts_info['agents_failed'] < len(
            agents_dict.keys()):
        for file in os.listdir(state_path):
            if file.endswith('.state'):
                states_exists = True
                daemon = str(file.split(".")[0])
                with open(os.path.join(state_path, file), 'r') as state_file:
                    file_content = state_file.read()
                values = {
                    'configuration': str(configuration),
                    'seconds': time.time()
                }
                # Skip configuration and seconds columns
                for field in list(daemons_dict[daemon])[2:]:
                    values[f'{field}'] = np.float32(
                        re.search(rf"{field}='([0-9.]+)'", file_content,
                                  re.MULTILINE).group(1))

                logger.debug(
                    f'State {daemon} writing: {",".join(map(str, values.values()))}'
                )
                daemons_dict[daemon] = daemons_dict[daemon].append(
                    [values], ignore_index=True)
        counter += 1
        if counter % dataframe_write_every_rows == 0:
            logger.debug('Writing wazuh-analysisd state chunk')
            logger.debug('Writing wazuh-remoted state chunk')
            daemons_dict['wazuh-analysisd'] = append_to_dataframe(
                filename_analysisd, daemons_dict['wazuh-analysisd'])
            daemons_dict['wazuh-remoted'] = append_to_dataframe(
                filename_remoted, daemons_dict['wazuh-remoted'])
            counter = 0
        time.sleep(state_collector_time)

    if states_exists:
        for daemon, df in daemons_dict.items():
            filename = os.path.join(
                os.path.join(stats_path, f"state-{daemon}.csv"))
            df.to_csv(filename, index=False)
        logger.info(f'Finished state collector')
Example #13
0
def test_reconnect_time(get_local_internal_options,
                        configure_local_internal_options, get_configuration,
                        configure_environment, restart_logcollector):
    """Check if reconnect_time value works properly

    Ensure correspond debug logs are generated when Windows event log service stop. Also, when event log service is
    restarted, `wazuh-agent` should reconnect to it using reconnect_time value.
    """

    config = get_configuration['metadata']

    if time_to_seconds(
            config['reconnect_time']) >= timeout_callback_reconnect_time:
        pytest.xfail(
            "Expected fail: https://github.com/wazuh/wazuh/issues/8580")

    log_callback = logcollector.callback_eventchannel_analyzing(
        config['location'])
    wazuh_log_monitor.start(timeout=global_parameters.default_timeout,
                            callback=log_callback,
                            error_message=logcollector.
                            GENERIC_CALLBACK_ERROR_ANALYZING_EVENTCHANNEL)

    services.control_event_log_service('stop')

    log_callback = logcollector.callback_event_log_service_down(
        config['location'])
    wazuh_log_monitor.start(timeout=30,
                            callback=log_callback,
                            error_message=logcollector.
                            GENERIC_CALLBACK_ERROR_ANALYZING_EVENTCHANNEL)

    log_callback = logcollector.callback_trying_to_reconnect(
        config['location'], time_to_seconds(config['reconnect_time']))
    wazuh_log_monitor.start(timeout=30,
                            callback=log_callback,
                            error_message=logcollector.
                            GENERIC_CALLBACK_ERROR_ANALYZING_EVENTCHANNEL)

    services.control_event_log_service('start')

    time.sleep(1)

    if time_to_seconds(
            config['reconnect_time']) >= timeout_callback_reconnect_time:
        before = str(datetime.now())
        seconds_to_travel = time_to_seconds(config['reconnect_time']) / 2
        TimeMachine.travel_to_future(timedelta(seconds=seconds_to_travel))
        logger.debug(
            f"Changing the system clock from {before} to {datetime.now()}")

    log_callback = logcollector.callback_reconnect_eventchannel(
        config['location'])

    before = str(datetime.now())

    if time_to_seconds(
            config['reconnect_time']) >= timeout_callback_reconnect_time:
        TimeMachine.travel_to_future(timedelta(seconds=(seconds_to_travel)))
        logger.debug(
            f"Changing the system clock from {before} to {datetime.now()}")

    wazuh_log_monitor.start(
        timeout=30,
        callback=log_callback,
        error_message=logcollector.GENERIC_CALLBACK_ERROR_COMMAND_MONITORING)

    TimeMachine.time_rollback()