コード例 #1
0
def override_wazuh_conf(configuration, set_password):
    # Stop Wazuh
    control_service('stop', daemon='wazuh-authd')
    time.sleep(1)
    check_daemon_status(running=False, daemon='wazuh-authd')
    truncate_file(LOG_FILE_PATH)

    # Configuration for testing
    test_config = set_section_wazuh_conf(configuration.get('sections'))
    # Set new configuration
    write_wazuh_conf(test_config)

    # reset_client_keys
    clean_client_keys_file()
    # reset password
    reset_password(set_password)

    time.sleep(1)
    # Start Wazuh
    control_service('start', daemon='wazuh-authd')
    """Wait until agentd has begun"""
    def callback_agentd_startup(line):
        if 'Accepting connections on port 1515' in line:
            return line
        return None

    log_monitor = FileMonitor(LOG_FILE_PATH)
    log_monitor.start(timeout=30, callback=callback_agentd_startup)
    time.sleep(1)
コード例 #2
0
ファイル: test_audit.py プロジェクト: spothound/wazuh-qa
def test_restart_audit(tags_to_apply, should_restart, get_configuration, configure_environment, restart_syscheckd):
    """Check `<restart_audit>` functionality by removing the plugin and monitoring audit to see if it restart and create
    the file again.

    Args:
        tags_to_apply (set): Run test if matches with a configuration identifier, skip otherwise.
        should_restart (boolean): True if Auditd should restart, False otherwise
        get_configuration (fixture): Gets the current configuration of the test.
        configure_environment (fixture): Configure the environment for the execution of the test.
        restart_syscheckd (fixture): Restarts syscheck.
        wait_for_fim_start (fixture): Waits until the first FIM scan is completed.

    Raises:
        TimeoutError: If an expected event couldn't be captured.
        ValueError: If the time before the and after the restart are equal when auditd has been restarted or if the time
                    before and after the restart are different when auditd hasn't been restarted
    """

    def get_audit_creation_time():
        for proc in psutil.process_iter(attrs=['name']):
            if proc.name() == "auditd":
                logger.info(f"auditd detected. PID: {proc.pid}")
                return proc.create_time()
        pytest.fail("Auditd is not running")

    plugin_path = "/etc/audisp/plugins.d/af_wazuh.conf"

    logger.info('Applying the test configuration')
    check_apply_test(tags_to_apply, get_configuration['tags'])

    os.remove(plugin_path)

    time_before_restart = get_audit_creation_time()
    control_service('restart')
    try:
        check_daemon_status(timeout=30)
    except TimeoutError:
        pass
    time_after_restart = get_audit_creation_time()

    if should_restart:
        assert time_before_restart != time_after_restart, 'The time before restart audit is equal to ' \
                                                          'the time after restart'
    else:
        assert time_before_restart == time_after_restart, 'The time before restart audit is not equal to ' \
                                                          'the time after restart'

    assert os.path.isfile(plugin_path)
コード例 #3
0
ファイル: conftest.py プロジェクト: muharihar/wazuh-qa
def configure_environment_standalone_daemons(request):
    """Configure a custom environment for testing with specific Wazuh daemons only. Stopping wazuh-service is needed."""

    def remove_logs():
        """Remove all Wazuh logs"""
        for root, dirs, files in os.walk(WAZUH_LOGS_PATH):
            for file in files:
                os.remove(os.path.join(root, file))

    # Stop wazuh-service and ensure all daemons are stopped
    control_service('stop')
    check_daemon_status(running=False)

    # Remove all remaining Wazuh sockets
    delete_sockets()

    # Start selected daemons in debug mode and ensure they are running
    for daemon in getattr(request.module, 'used_daemons'):
        control_service('start', daemon=daemon, debug_mode=True)
        check_daemon_status(running=True, daemon=daemon)

    # Clear all Wazuh logs
    truncate_file(LOG_FILE_PATH)

    # Call extra functions before yield
    if hasattr(request.module, 'extra_configuration_before_yield'):
        func = getattr(request.module, 'extra_configuration_before_yield')
        func()

    yield

    # Call extra functions after yield
    if hasattr(request.module, 'extra_configuration_after_yield'):
        func = getattr(request.module, 'extra_configuration_after_yield')
        func()

    # Stop selected daemons
    for daemon in getattr(request.module, 'used_daemons'):
        control_service('stop', daemon=daemon)

    # Remove all remaining Wazuh sockets
    delete_sockets()

    # Remove all Wazuh logs
    remove_logs()
コード例 #4
0
def test_restart_audit(tags_to_apply, should_restart, get_configuration,
                       configure_environment, restart_syscheckd):
    """Check <restart_audit> functionality by removing the plugin and monitoring audit to see if it restart and create
    the file again.

    Parameters
    ----------
    tags_to_apply : set
        Run test if matches with a configuration identifier, skip otherwise
    should_restart : boolean
        True if Auditd should restart, False otherwise
    """
    def get_audit_creation_time():
        for proc in psutil.process_iter(attrs=['name']):
            if proc.name() == "auditd":
                logger.info(f"auditd detected. PID: {proc.pid}")
                return proc.create_time()
        pytest.fail("Auditd is not running")

    plugin_path = "/etc/audisp/plugins.d/af_wazuh.conf"

    logger.info('Applying the test configuration')
    check_apply_test(tags_to_apply, get_configuration['tags'])

    os.remove(plugin_path)

    time_before_restart = get_audit_creation_time()
    control_service('restart')
    try:
        check_daemon_status(timeout=30)
    except TimeoutError:
        pass
    time_after_restart = get_audit_creation_time()

    if should_restart:
        assert time_before_restart != time_after_restart, 'The time before restart audit is equal to ' \
                                                          'the time after restart'
    else:
        assert time_before_restart == time_after_restart, 'The time before restart audit is not equal to ' \
                                                          'the time after restart'

    assert os.path.isfile(plugin_path)
コード例 #5
0
ファイル: conftest.py プロジェクト: muharihar/wazuh-qa
def configure_mitm_environment_wazuhdb(request):
    """Use MITM to replace analysisd and wazuh-db sockets."""
    wdb_path = getattr(request.module, 'wdb_path')

    # Stop wazuh-service and ensure all daemons are stopped
    control_service('stop')
    check_daemon_status(running=False)
    remove_logs()

    control_service('start', daemon='wazuh-db', debug_mode=True)
    check_daemon_status(running=True, daemon='wazuh-db')

    mitm_wdb = ManInTheMiddle(socket_path=wdb_path)
    wdb_queue = mitm_wdb.queue
    mitm_wdb.start()

    wdb_monitor = QueueMonitor(queue_item=wdb_queue)

    setattr(request.module, 'wdb_monitor', wdb_monitor)

    yield

    mitm_wdb.shutdown()

    for daemon in ['wazuh-db']:
        control_service('stop', daemon=daemon)
        check_daemon_status(running=False, daemon=daemon)

    # Delete all db
    delete_dbs()

    control_service('start')
コード例 #6
0
ファイル: conftest.py プロジェクト: muharihar/wazuh-qa
def configure_mitm_environment_analysisd(request):
    """Use MITM to replace analysisd and wazuh-db sockets."""
    def remove_logs():
        for root, dirs, files in os.walk(WAZUH_LOGS_PATH):
            for file in files:
                os.remove(os.path.join(root, file))

    analysis_path = getattr(request.module, 'analysis_path')
    wdb_path = getattr(request.module, 'wdb_path')

    # Stop wazuh-service and ensure all daemons are stopped
    control_service('stop')
    check_daemon_status(running=False)
    remove_logs()

    control_service('start', daemon='wazuh-db', debug_mode=True)
    check_daemon_status(running=True, daemon='wazuh-db')

    mitm_wdb = ManInTheMiddle(socket_path=wdb_path)
    wdb_queue = mitm_wdb.queue
    mitm_wdb.start()

    control_service('start', daemon='ossec-analysisd', debug_mode=True)
    check_daemon_status(running=True, daemon='ossec-analysisd')

    mitm_analysisd = ManInTheMiddle(socket_path=analysis_path, mode='UDP')
    analysisd_queue = mitm_analysisd.queue
    mitm_analysisd.start()

    analysis_monitor = QueueMonitor(queue_item=analysisd_queue)
    wdb_monitor = QueueMonitor(queue_item=wdb_queue)

    setattr(request.module, 'analysis_monitor', analysis_monitor)
    setattr(request.module, 'wdb_monitor', wdb_monitor)

    yield

    mitm_analysisd.shutdown()
    mitm_wdb.shutdown()

    for daemon in ['wazuh-db', 'ossec-analysisd']:
        control_service('stop', daemon=daemon)
        check_daemon_status(running=False, daemon=daemon)

    control_service('start')
コード例 #7
0
ファイル: conftest.py プロジェクト: adampielak/wazuh-qa
def configure_mitm_environment(request):
    """Configure environment for sockets and MITM"""
    monitored_sockets_params = getattr(request.module,
                                       'monitored_sockets_params')
    log_monitor_paths = getattr(request.module, 'log_monitor_paths')

    # Stop wazuh-service and ensure all daemons are stopped
    control_service('stop')
    check_daemon_status(running=False)

    monitored_sockets = list()
    mitm_list = list()
    log_monitors = list()

    # Truncate logs and create FileMonitors
    for log in log_monitor_paths:
        truncate_file(log)
        log_monitors.append(FileMonitor(log))

    # Start selected daemons and monitored sockets MITM
    for daemon, mitm, daemon_first in monitored_sockets_params:
        not daemon_first and mitm is not None and mitm.start()
        control_service('start', daemon=daemon, debug_mode=True)
        check_daemon_status(
            running=True,
            daemon=daemon,
            extra_sockets=[mitm.listener_socket_address]
            if mitm is not None and mitm.family == 'AF_UNIX' else None)
        daemon_first and mitm is not None and mitm.start()
        if mitm is not None:
            monitored_sockets.append(QueueMonitor(queue_item=mitm.queue))
            mitm_list.append(mitm)

    setattr(request.module, 'monitored_sockets', monitored_sockets)
    setattr(request.module, 'log_monitors', log_monitors)

    yield

    # Stop daemons and monitored sockets MITM
    for daemon, mitm, _ in monitored_sockets_params:
        mitm is not None and mitm.shutdown()
        control_service('stop', daemon=daemon)
        check_daemon_status(
            running=False,
            daemon=daemon,
            extra_sockets=[mitm.listener_socket_address]
            if mitm is not None and mitm.family == 'AF_UNIX' else None)

    # Delete all db
    delete_dbs()

    control_service('start')
コード例 #8
0
def generate_analysisd_yaml(n_events, modify_events):
    def parse_events_into_yaml(requests, yaml_file):
        yaml_result = []
        with open(yaml_file, 'a') as y_f:
            id_ev = 0
            for req, event in requests:
                type_ev = event['data']['type']
                stage_ev = type_ev.title()
                mode = None
                agent_id = callback_analysisd_agent_id(req) or '000'

                del event['data']['mode']
                del event['data']['type']
                if 'tags' in event['data']:
                    del event['data']['tags']
                if type_ev == 'added':
                    mode = 'save2'
                    output_ev = json.dumps(event['data'])

                elif type_ev == 'deleted':
                    mode = 'delete'
                    output_ev = json.dumps(event['data']['path']).replace(
                        '"', '')

                elif type_ev == 'modified':
                    mode = 'save2'
                    for field in [
                            'old_attributes', 'changed_attributes',
                            'content_changes'
                    ]:
                        if field in event['data']:
                            del event['data'][field]
                    output_ev = json.dumps(event['data'])

                yaml_result.append({
                    'name':
                    f"{stage_ev}{id_ev}",
                    'test_case': [{
                        'input': f"{req}",
                        'output':
                        f"agent {agent_id} syscheck {mode} {output_ev}",
                        'stage': f"{stage_ev}"
                    }]
                })
                id_ev += 1
            y_f.write(yaml.safe_dump(yaml_result))

    def remove_logs():
        for root, dirs, files in os.walk(WAZUH_LOGS_PATH):
            for file in files:
                os.remove(os.path.join(root, file))

    # Restart syscheckd with the new configuration
    truncate_file(LOG_FILE_PATH)
    control_service('stop')
    check_daemon_status(running=False)

    remove_logs()

    control_service('start', daemon='ossec-analysisd', debug_mode=True)
    check_daemon_status(running=True, daemon='ossec-analysisd')

    mitm_analysisd = ManInTheMiddle(address=analysis_path,
                                    family='AF_UNIX',
                                    connection_protocol='UDP')
    analysis_queue = mitm_analysisd.queue
    mitm_analysisd.start()

    control_service('start', daemon='ossec-remoted', debug_mode=True)
    check_daemon_status(running=True, daemon='ossec-remoted')

    analysis_monitor = QueueMonitor(analysis_queue)

    while True:
        try:
            grep = subprocess.Popen(['grep', 'deleted', alerts_json],
                                    stdout=subprocess.PIPE)
            wc = int(
                subprocess.check_output([
                    'wc',
                    '-l',
                ], stdin=grep.stdout).decode())
        except subprocess.CalledProcessError:
            wc = 0
        if wc >= n_events:
            logging.debug('All alerts received. Collecting by alert type...')
            break
        logger.debug(f'{wc} deleted events so far.')
        logger.debug('Waiting for alerts. Sleeping 5 seconds.')
        time.sleep(5)

    added = analysis_monitor.start(timeout=max(0.01 * n_events, 10),
                                   callback=callback_analysisd_event,
                                   accum_results=n_events).result()
    logger.debug('"added" alerts collected.')

    modified = analysis_monitor.start(timeout=max(0.01 * n_events, 10),
                                      callback=callback_analysisd_event,
                                      accum_results=modify_events).result()
    logger.debug('"modified" alerts collected.')

    deleted = analysis_monitor.start(timeout=max(0.01 * n_events, 10),
                                     callback=callback_analysisd_event,
                                     accum_results=n_events).result()
    logger.debug('"deleted" alerts collected.')

    # Truncate file
    with open(yaml_file, 'w') as y_f:
        y_f.write(f'---\n')

    for ev_list in [added, modified, deleted]:
        parse_events_into_yaml(ev_list, yaml_file)
    logger.debug(f'YAML done: "{yaml_file}"')

    return mitm_analysisd
コード例 #9
0
def kill_daemons():
    for daemon in ['ossec-remoted', 'ossec-analysisd']:
        control_service('stop', daemon=daemon)
        check_daemon_status(running=False, daemon=daemon)
コード例 #10
0
def test_performance(mode, file_size, eps, path_length, number_files, initial_clean, modify_local_internal_options):
    """Execute and launch all the necessary processes to check all the cases with all the specified configurations."""
    replace_conf(eps['sync_eps'], eps['fim_eps'])
    branch = detect_syscheck_version()
    os.makedirs(performance_dir, exist_ok=True)
    fconfiguration = f'{number_files}files_{path_length}length_{file_size}size'
    configuration = {
        'file_size': file_size,
        'path_length': path_length,
        'number_files': number_files,
        'real_number_files': 6000,
        'mode': mode
    }
    state_status = Manager().dict({'stop': False, 'finish': False, 'state': 'scan'})
    state_filename = os.path.join(performance_dir, f"{branch}_agentd_state.csv")
    integrity_filename = os.path.join(performance_dir, f"{branch}_time_checksum_integrity.csv")
    data_filename = os.path.join(performance_dir, f'{branch}_stats.csv')
    try:
        integrity_df = pd.read_csv(integrity_filename)
    except FileNotFoundError:
        integrity_df = pd.DataFrame(columns=['configuration', 'stage', 'db_size', 'journal_db_size', 'duration(s)'])
    data_df = pd.DataFrame(columns=['configuration', 'seconds', 'cpu(%)', 'mem(KB)', 'rchar(KB/s)', 'wchar(KB/s)',
                                    'syscr(Input/s)', 'syscw(Output/s)', 'read_bytes(KB/s)', 'write_bytes(KB/s)',
                                    'cancelled_write_bytes(KB)', 'stage'])

    # Stop Wazuh
    control_service(daemon=tested_daemon, action='stop')
    check_daemon_status(daemon=tested_daemon, running=False)

    # Create number_files
    path_name = create_long_path(path_length, "scan")
    create_n_files(path_name, number_files, file_size)

    # Start Wazuh
    truncate_file(LOG_FILE_PATH)
    control_service(daemon=tested_daemon, action='start')
    check_daemon_status(daemon=tested_daemon, running=True)

    # Start state collector
    state_process = Process(target=state_collector, args=(state_filename, fconfiguration, state_status,))
    state_process.start()

    # Test Scan
    scan_integrity_test(fim_df=data_df, string_configuration=fconfiguration, configuration=configuration,
                        integrity_df=integrity_df, fim_type=state_status['state'])

    # Test Integrity
    state_status['state'] = 'integrity'
    scan_integrity_test(fim_df=data_df, string_configuration=fconfiguration, integrity_df=integrity_df,
                        configuration=configuration, fim_type=state_status['state'])

    # Test real-time (added, modified, deleted)
    truncate_file(LOG_FILE_PATH)
    state_status['state'] = Types.added.value
    real_test(state_status['state'], string_configuration=fconfiguration, real_df=data_df, integrity_df=integrity_df,
              configuration=configuration)
    truncate_file(LOG_FILE_PATH)
    state_status['state'] = Types.modified.value
    real_test(state_status['state'], string_configuration=fconfiguration, real_df=data_df, integrity_df=integrity_df,
              configuration=configuration)
    truncate_file(LOG_FILE_PATH)
    state_status['state'] = Types.deleted.value
    real_test(state_status['state'], string_configuration=fconfiguration, real_df=data_df, integrity_df=integrity_df,
              configuration=configuration)

    # Finishing
    state_status['stop'] = True
    while not state_status['finish']:
        time.sleep(0.1)
    state_process.join()
    integrity_df.to_csv(integrity_filename, index=False)
    data_df.to_csv(data_filename, index=False)

    # Clean environment
    clean_environment()
コード例 #11
0
def generate_analysisd_yaml(n_events, modify_events):
    def parse_events_into_yaml(requests, yaml_file):
        yaml_result = []
        with open(yaml_file, 'a') as y_f:
            id_ev = 0
            for req, event in requests:
                type_ev = event['data']['type']
                stage_ev = type_ev.title()
                mode = None
                agent_id = callback_analysisd_agent_id(req) or '000'

                del event['data']['mode']
                del event['data']['type']
                if 'tags' in event['data']:
                    del event['data']['tags']
                if type_ev == 'added':
                    mode = 'save2'
                    output_ev = json.dumps(event['data'])

                elif type_ev == 'deleted':
                    mode = 'delete'
                    output_ev = json.dumps(event['data']['path']).replace(
                        '"', '')

                elif type_ev == 'modified':
                    mode = 'save2'
                    for field in [
                            'old_attributes', 'changed_attributes',
                            'content_changes'
                    ]:
                        if field in event['data']:
                            del event['data'][field]
                    output_ev = json.dumps(event['data'])

                yaml_result.append({
                    'name':
                    f"{stage_ev}{id_ev}",
                    'test_case': [{
                        'input': f"{req}",
                        'output':
                        f"agent {agent_id} syscheck {mode} {output_ev}",
                        'stage': f"{stage_ev}"
                    }]
                })
                id_ev += 1
            y_f.write(yaml.safe_dump(yaml_result))

    def remove_logs():
        for root, dirs, files in os.walk(WAZUH_LOGS_PATH):
            for file in files:
                os.remove(os.path.join(root, file))

    file = 'regular'

    # Restart syscheckd with the new configuration
    truncate_file(LOG_FILE_PATH)
    file_monitor = FileMonitor(LOG_FILE_PATH)
    control_service('stop')
    check_daemon_status(running=False)
    remove_logs()

    control_service('start', daemon='wazuh-db', debug_mode=True)
    check_daemon_status(running=True, daemon='wazuh-db')

    control_service('start', daemon='wazuh-analysisd', debug_mode=True)
    check_daemon_status(running=True, daemon='wazuh-analysisd')

    mitm_analysisd = ManInTheMiddle(address=analysis_path,
                                    family='AF_UNIX',
                                    connection_protocol='UDP')
    analysis_queue = mitm_analysisd.queue
    mitm_analysisd.start()

    control_service('start', daemon='wazuh-syscheckd', debug_mode=True)
    check_daemon_status(running=True, daemon='wazuh-syscheckd')

    # Wait for initial scan
    detect_initial_scan(file_monitor)

    analysis_monitor = QueueMonitor(analysis_queue)

    for directory in directories_list:
        create_file(REGULAR, directory, file, content='')
        time.sleep(0.01)
    added = analysis_monitor.start(
        timeout=max(0.01 * n_events, 10),
        callback=callback_analysisd_event,
        accum_results=len(directories_list)).result()
    logger.debug('"added" alerts collected.')

    for directory in directories_list:
        modify_file(directory, file, new_content='Modified')
        time.sleep(0.01)
    modified = analysis_monitor.start(timeout=max(0.01 * n_events, 10),
                                      callback=callback_analysisd_event,
                                      accum_results=modify_events).result()
    logger.debug('"modified" alerts collected.')

    for directory in directories_list:
        delete_file(directory, file)
        time.sleep(0.01)
    deleted = analysis_monitor.start(
        timeout=max(0.01 * len(directories_list), 10),
        callback=callback_analysisd_event,
        accum_results=len(directories_list)).result()
    logger.debug('"deleted" alerts collected.')

    # Truncate file
    with open(yaml_file, 'w') as y_f:
        y_f.write(f'---\n')

    for ev_list in [added, modified, deleted]:
        parse_events_into_yaml(ev_list, yaml_file)
    logger.debug(f'YAML done: "{yaml_file}"')

    return mitm_analysisd
コード例 #12
0
def kill_daemons():
    for daemon in ['wazuh-analysisd', 'wazuh-db', 'wazuh-syscheckd']:
        control_service('stop', daemon=daemon)
        check_daemon_status(running=False, daemon=daemon)