Пример #1
0
def get_files_status(node_type, get_md5=True):
    """Get all files and metadata inside the directories listed in cluster.json['files'].

    Parameters
    ----------
    node_type : str
        TODO - To be deprecated
    get_md5 : bool
        Whether to calculate and save the MD5 hash of the found file.

    Returns
    -------
    final_items : dict
        Paths (keys) and metadata (values) of all the files requested in cluster.json['files'].
    """
    cluster_items = get_cluster_items()

    final_items = {}
    for file_path, item in cluster_items['files'].items():
        if file_path == "excluded_files" or file_path == "excluded_extensions":
            continue

        if item['source'] == node_type or item['source'] == 'all':
            try:
                final_items.update(
                    walk_dir(file_path, item['recursive'], item['files'],
                             cluster_items['files']['excluded_files'],
                             cluster_items['files']['excluded_extensions'],
                             file_path, get_md5, node_type))
            except Exception as e:
                logger.warning("Error getting file status: {}.".format(e))

    return final_items
Пример #2
0
def get_files_status(node_type, node_name, get_md5=True):
    cluster_items = get_cluster_items()

    final_items = {}
    for file_path, item in cluster_items['files'].items():
        if file_path == "excluded_files" or file_path == "excluded_extensions":
            continue

        if item['source'] == node_type or item['source'] == 'all':
            if item.get("files") and "agent-info.merged" in item["files"]:
                agents_to_send, merged_path = \
                    merge_agent_info(merge_type="agent-info",
                                     node_name=node_name,
                                     time_limit_seconds=cluster_items['sync_options']['get_agentinfo_newer_than']
                                     )
                if agents_to_send == 0:
                    return {}

                fullpath = path.dirname(merged_path)
            else:
                fullpath = file_path
            try:
                final_items.update(
                    walk_dir(fullpath, item['recursive'], item['files'],
                             cluster_items['files']['excluded_files'],
                             cluster_items['files']['excluded_extensions'],
                             file_path, get_md5, node_type))
            except Exception as e:
                logger.warning("Error getting file status: {}.".format(e))

    return final_items
Пример #3
0
def get_files_status(get_md5=True):
    """Get all files and metadata inside the directories listed in cluster.json['files'].

    Parameters
    ----------
    get_md5 : bool
        Whether to calculate and save the MD5 hash of the found file.

    Returns
    -------
    final_items : dict
        Paths (keys) and metadata (values) of all the files requested in cluster.json['files'].
    """

    cluster_items = get_cluster_items()

    final_items = {}
    for file_path, item in cluster_items['files'].items():
        if file_path == "excluded_files" or file_path == "excluded_extensions":
            continue
        try:
            final_items.update(
                walk_dir(file_path, item['recursive'], item['files'], cluster_items['files']['excluded_files'],
                         cluster_items['files']['excluded_extensions'], file_path, get_md5))
        except Exception as e:
            logger.warning(f"Error getting file status: {e}.")
    # Save the information collected in the current integration process.
    common.cluster_integrity_mtime.set(final_items)

    return final_items
Пример #4
0
def get_cluster_items_worker_intervals():
    """Get worker's time intervals specified in cluster.json file.

    Returns
    -------
    dict
        Worker's time intervals specified in cluster.json file.
    """
    return get_cluster_items()['intervals']['worker']
Пример #5
0
def get_cluster_items_communication_intervals():
    """Get communication's time intervals specified in cluster.json file.

    Returns
    -------
    dict
        Communication's time intervals specified in cluster.json file.
    """
    return get_cluster_items()['intervals']['communication']
Пример #6
0
def test_get_cluster_items():
    """Verify the cluster files information."""
    utils.get_cluster_items.cache_clear()

    with patch('os.path.abspath', side_effect=FileNotFoundError):
        with pytest.raises(WazuhException, match='.* 3005 .*'):
            utils.get_cluster_items()

    items = utils.get_cluster_items()
    assert items == {'files': {'etc/': {'permissions': 416, 'source': 'master', 'files': ['client.keys'],
                                         'recursive': False, 'restart': False, 'remove_subdirs_if_empty': False,
                                         'extra_valid': False, 'description': 'client keys file database'},
                               'etc/shared/': {'permissions': 432, 'source': 'master', 'files': ['merged.mg'],
                                                'recursive': True, 'restart': False, 'remove_subdirs_if_empty': True,
                                                'extra_valid': False, 'description': 'shared configuration files'},
                               'var/multigroups/': {'permissions': 432, 'source': 'master', 'files': ['merged.mg'],
                                                     'recursive': True, 'restart': False,
                                                     'remove_subdirs_if_empty': True, 'extra_valid': False,
                                                     'description': 'shared configuration files'},
                               'etc/rules/': {'permissions': 432, 'source': 'master', 'files': ['all'],
                                               'recursive': True, 'restart': True, 'remove_subdirs_if_empty': False,
                                               'extra_valid': False, 'description': 'user rules'},
                               'etc/decoders/': {'permissions': 432, 'source': 'master', 'files': ['all'],
                                                  'recursive': True, 'restart': True, 'remove_subdirs_if_empty': False,
                                                  'extra_valid': False, 'description': 'user decoders'},
                               'etc/lists/': {'permissions': 432, 'source': 'master', 'files': ['all'],
                                               'recursive': True, 'restart': True, 'remove_subdirs_if_empty': False,
                                               'extra_valid': False, 'description': 'user CDB lists'},
                               'queue/agent-groups/': {'permissions': 432, 'source': 'master', 'files': ['all'],
                                                        'recursive': True, 'restart': False,
                                                        'remove_subdirs_if_empty': False, 'extra_valid': True,
                                                        'description': 'agents group configuration'},
                               'excluded_files': ['ar.conf', 'ossec.conf'],
                               'excluded_extensions': ['~', '.tmp', '.lock', '.swp']},
                     'intervals': {'worker': {'sync_integrity': 9, 'sync_agent_info': 10, 'sync_agent_info_ko_retry': 1,
                                              'keep_alive': 60, 'connection_retry': 10,
                                              'max_failed_keepalive_attempts': 2},
                                   'master': {'recalculate_integrity': 8, 'check_worker_lastkeepalive': 60,
                                              'max_allowed_time_without_keepalive': 120},
                                   'communication': {'timeout_cluster_request': 20, 'timeout_api_request': 200,
                                                     'timeout_api_exe': 10, 'timeout_receiving_file': 120}},
                     'sync_options': {'get_agentinfo_newer_than': 1800}, 'distributed_api': {'enabled': True}}
Пример #7
0
def get_files_status(node_type, get_md5=True):
    cluster_items = get_cluster_items()

    final_items = {}
    for file_path, item in cluster_items['files'].items():
        if file_path == "excluded_files" or file_path == "excluded_extensions":
            continue

        if item['source'] == node_type or item['source'] == 'all':
            try:
                final_items.update(
                    walk_dir(file_path, item['recursive'], item['files'],
                             cluster_items['files']['excluded_files'],
                             cluster_items['files']['excluded_extensions'],
                             file_path, get_md5, node_type))
            except Exception as e:
                logger.warning("Error getting file status: {}.".format(e))

    return final_items
Пример #8
0
        debug_mode = 0

    # set correct permissions on cluster.log file
    if os.path.exists('{0}/logs/cluster.log'.format(common.wazuh_path)):
        os.chown('{0}/logs/cluster.log'.format(common.wazuh_path),
                 common.wazuh_uid(), common.wazuh_gid())
        os.chmod('{0}/logs/cluster.log'.format(common.wazuh_path), 0o660)

    main_logger = set_logging(foreground_mode=args.foreground,
                              debug_mode=debug_mode)

    cluster_configuration = cluster_utils.read_config(
        config_file=args.config_file)
    if cluster_configuration['disabled']:
        sys.exit(0)
    cluster_items = cluster_utils.get_cluster_items()
    try:
        wazuh.core.cluster.cluster.check_cluster_config(cluster_configuration)
    except Exception as e:
        main_logger.error(e)
        sys.exit(1)

    if args.test_config:
        sys.exit(0)

    cluster_status = wazuh.core.cluster.utils.get_cluster_status()
    if cluster_status['running'] == 'yes':
        main_logger.error("Cluster is already running.")
        sys.exit(1)

    # clean
Пример #9
0
def compare_files(good_files, check_files, node_name):
    """Compare metadata of the master files with metadata of files sent by a worker node.

    Compare the integrity information of each file of the master node against those in the worker node (listed in
    cluster.json), calculated in get_files_status(). The files are classified in four groups depending on the
    information of cluster.json: missing, extra, extra_valid and shared.

    Parameters
    ----------
    good_files : dict
        Paths (keys) and metadata (values) of the master's files.
    check_files : dict
        Paths (keys) and metadata (values) of the worker's files.
    node_name : str
        Name of the worker whose files are being compared.

    Returns
    -------
    files : dict
        Paths (keys) and metadata (values) of the files classified into four groups.
    count : int
        Number of files inside each classification.
    """
    def split_on_condition(seq, condition):
        """Split a sequence into two generators based on a condition.

        Parameters
        ----------
        seq : set
            Set of items to split.
        condition : callable
            Function base splitting on.

        Returns
        -------
        generator
            Items that meet the condition.
        generator
            Items that do not meet the condition.
        """
        l1, l2 = itertools.tee((condition(item), item) for item in seq)
        return (i for p, i in l1 if p), (i for p, i in l2 if not p)

    # Get 'files' dictionary inside cluster.json.
    cluster_items = get_cluster_items()['files']

    # Missing files will be the ones that are present in good files (master) but not in the check files (worker).
    missing_files = {
        key: good_files[key]
        for key in good_files.keys() - check_files.keys()
    }

    # Extra files are the ones present in check files (worker) but not in good files (master) and aren't extra valid.
    extra_valid, extra = split_on_condition(
        check_files.keys() - good_files.keys(), lambda x: cluster_items[
            check_files[x]['cluster_item_key']]['extra_valid'])
    extra_files = {key: check_files[key] for key in extra}
    extra_valid_files = {key: check_files[key] for key in extra_valid}

    # 'all_shared' files are the ones present in both sets but with different MD5 checksum.
    all_shared = [
        x for x in check_files.keys() & good_files.keys()
        if check_files[x]['md5'] != good_files[x]['md5']
    ]

    # 'shared_e_v' are files present in both nodes but need to be merged before sending them to the worker. Only
    # 'agent-groups' files fit into this category.
    # 'shared' files can be sent as is, without merging.
    shared_e_v, shared = split_on_condition(
        all_shared, lambda x: cluster_items[check_files[x]['cluster_item_key']]
        ['extra_valid'])
    shared_e_v = list(shared_e_v)
    if shared_e_v:
        # Merge all shared extra valid files into a single one. Create a tuple (merged_filepath, {metadata_dict}).
        shared_merged = [(merge_info(merge_type='agent-groups',
                                     files=shared_e_v,
                                     file_type='-shared',
                                     node_name=node_name,
                                     time_limit_seconds=0)[1], {
                                         'cluster_item_key':
                                         '/queue/agent-groups/',
                                         'merged': True,
                                         'merge-type': 'agent-groups'
                                     })]

        # Dict merging all 'shared' filepaths (keys) and the merged_filepath (key) created above.
        shared_files = dict(
            itertools.chain(shared_merged,
                            ((key, good_files[key]) for key in shared)))
    else:
        shared_files = {key: good_files[key] for key in shared}

    files = {
        'missing': missing_files,
        'extra': extra_files,
        'shared': shared_files,
        'extra_valid': extra_valid_files
    }
    count = {
        'missing': len(missing_files),
        'extra': len(extra_files),
        'extra_valid': len(extra_valid_files),
        'shared': len(all_shared)
    }

    return files, count
Пример #10
0
def get_cluster_items_worker_intervals():
    return get_cluster_items()['intervals']['worker']
Пример #11
0
def get_cluster_items_communication_intervals():
    return get_cluster_items()['intervals']['communication']
Пример #12
0
def get_cluster_items_master_intervals():
    return get_cluster_items()['intervals']['master']
Пример #13
0
def compare_files(good_files, check_files, node_name):
    def split_on_condition(seq, condition):
        """
        Splits a sequence into two generators based on a conditon
        :param seq: sequence to split
        :param condition: function base splitting on
        :return: two generators
        """
        l1, l2 = itertools.tee((condition(item), item) for item in seq)
        return (i for p, i in l1 if p), (i for p, i in l2 if not p)

    cluster_items = get_cluster_items()['files']

    # missing files will be the ones that are present in good files but not in the check files
    missing_files = {
        key: good_files[key]
        for key in good_files.keys() - check_files.keys()
    }

    # extra files are the ones present in check files but not in good files and aren't extra valid
    extra_valid, extra = split_on_condition(
        check_files.keys() - good_files.keys(), lambda x: cluster_items[
            check_files[x]['cluster_item_key']]['extra_valid'])
    extra_files = {key: check_files[key] for key in extra}
    extra_valid_files = {key: check_files[key] for key in extra_valid}
    # shared files are the ones present in both sets.
    all_shared = [
        x for x in check_files.keys() & good_files.keys()
        if check_files[x]['md5'] != good_files[x]['md5']
    ]
    shared_e_v, shared = split_on_condition(
        all_shared, lambda x: cluster_items[check_files[x]['cluster_item_key']]
        ['extra_valid'])
    shared_e_v = list(shared_e_v)
    if shared_e_v:
        # merge all shared extra valid files into a single one.
        # To Do: if more extra valid files types are included, compute their merge type and remove hardcoded
        # agent-groups
        shared_merged = [(merge_agent_info(merge_type='agent-groups',
                                           files=shared_e_v,
                                           file_type='-shared',
                                           node_name=node_name,
                                           time_limit_seconds=0)[1], {
                                               'cluster_item_key':
                                               '/queue/agent-groups/',
                                               'merged': True,
                                               'merge-type': 'agent-groups'
                                           })]

        shared_files = dict(
            itertools.chain(shared_merged,
                            ((key, good_files[key]) for key in shared)))
    else:
        shared_files = {key: good_files[key] for key in shared}

    files = {
        'missing': missing_files,
        'extra': extra_files,
        'shared': shared_files,
        'extra_valid': extra_valid_files
    }
    count = {
        'missing': len(missing_files),
        'extra': len(extra_files),
        'extra_valid': len(extra_valid_files),
        'shared': len(all_shared)
    }

    return files, count