Exemple #1
0
def check_cluster_config(config):
    iv = InputValidator()
    reservated_ips = {'localhost', 'NODE_IP', '0.0.0.0', '127.0.1.1'}

    if len(config['key']) == 0:
        raise WazuhException(3004, 'Unspecified key')
    elif not iv.check_name(config['key']) or not iv.check_length(
            config['key'], 32, eq):
        raise WazuhException(
            3004,
            'Key must be 32 characters long and only have alphanumeric characters'
        )

    elif config['node_type'] != 'master' and config['node_type'] != 'worker':
        raise WazuhException(
            3004,
            'Invalid node type {0}. Correct values are master and worker'.
            format(config['node_type']))

    elif not 1024 < config['port'] < 65535:
        raise WazuhException(
            3004, "Port must be higher than 1024 and lower than 65535.")

    if len(config['nodes']) > 1:
        logger.warning(
            "Found more than one node in configuration. Only master node should be specified. Using {} as master."
            .format(config['nodes'][0]))

    invalid_elements = list(reservated_ips & set(config['nodes']))

    if len(invalid_elements) != 0:
        raise WazuhException(
            3004, "Invalid elements in node fields: {0}.".format(
                ', '.join(invalid_elements)))
Exemple #2
0
def compress_files(name, list_path, cluster_control_json=None):
    zip_file_path = "{0}/queue/cluster/{1}/{1}-{2}-{3}.zip".format(
        common.ossec_path, name, time(),
        str(random())[2:])
    if not os.path.exists(os.path.dirname(zip_file_path)):
        mkdir_with_mode(os.path.dirname(zip_file_path))
    with zipfile.ZipFile(zip_file_path, 'x') as zf:
        # write files
        if list_path:
            for f in list_path:
                try:
                    zf.write(filename=common.ossec_path + f, arcname=f)
                except zipfile.LargeZipFile as e:
                    raise WazuhException(3001, str(e))
                except Exception as e:
                    logger.error("[Cluster] {}".format(
                        str(WazuhException(3001, str(e)))))

        try:
            zf.writestr("cluster_control.json",
                        json.dumps(cluster_control_json))
        except Exception as e:
            raise WazuhException(3001, str(e))

    return zip_file_path
Exemple #3
0
    async def execute(self, command: bytes, data: bytes,
                      wait_for_complete: bool) -> str:
        """
        Sends a distributed API request and wait for a response in command dapi_res. Methods here are the same
        as the ones defined in LocalServerHandlerMaster.

        :param command: Command to execute
        :param data: Data to send
        :param wait_for_complete: Raise a timeout exception or not
        :return: The request response
        """
        request_id = str(random.randint(0, 2**10 - 1))
        # create an event to wait for the response
        self.server.pending_api_requests[request_id] = {
            'Event': asyncio.Event(),
            'Response': ''
        }

        if command == b'dapi_forward':
            client, request = data.split(b' ', 1)
            client = client.decode()
            if client == 'fw_all_nodes':
                for worker in self.server.clients.values():
                    result = (await worker.send_request(
                        b'dapi',
                        request_id.encode() + b' ' + request)).decode()
            elif client in self.server.clients:
                result = (await self.server.clients[client].send_request(
                    b'dapi',
                    request_id.encode() + b' ' + request)).decode()
            else:
                raise WazuhException(3022, client)
        else:
            result = (await self.send_request(
                b'dapi',
                request_id.encode() + b' ' + data)).decode()

        if result.startswith('Error'):
            request_result = json.dumps({'error': 3009, 'message': result})
        else:
            if command == b'dapi' or command == b'dapi_forward':
                try:
                    timeout = None if wait_for_complete \
                                   else self.cluster_items['intervals']['communication']['timeout_api_request']
                    await asyncio.wait_for(
                        self.server.pending_api_requests[request_id]
                        ['Event'].wait(),
                        timeout=timeout)
                    request_result = self.server.pending_api_requests[
                        request_id]['Response']
                except asyncio.TimeoutError:
                    request_result = json.dumps({
                        'error': 3000,
                        'message': 'Timeout exceeded'
                    })
            else:
                request_result = result
        return request_result
def test_results_AffectedItemsWazuhResult_encode_decode_json(get_wazuh_affected_item):
    """Test methods `encode_json` and `decode_json` from class `AffectedItemsWazuhResult`."""
    param_list = [['001', '002'], 2, None, ['int'], [True, True], 'Sample message', 'Sample message', 'Sample message']
    affected_result = get_wazuh_affected_item(param_list)
    affected_result.add_failed_item(id_=FAILED_AGENT_ID, error=WazuhException(WAZUH_EXCEPTION_CODE))
    # Use a complete AffectedIemsWazuhResult to encode a json and then decode it
    json_item = affected_result.encode_json()
    decoded_json = AffectedItemsWazuhResult.decode_json(json_item)
    assert affected_result == decoded_json
def test_results_AffectedItemsWazuhResult_add_failed_item():
    """Test method `add_failed_item` from class `AffectedItemsWazuhResult`."""
    affected_result = AffectedItemsWazuhResult()
    id_list = ['001', '002']
    # Add two failed items with different id but same exception
    for agent_id in id_list:
        affected_result.add_failed_item(id_=agent_id, error=WazuhException(WAZUH_EXCEPTION_CODE))

    assert affected_result.failed_items
    assert set(id_list) == next(iter(affected_result.failed_items.values()))
Exemple #6
0
def test_read_empty_configuration():
    """
    Test reading an empty cluster configuration
    """
    with patch('wazuh.core.cluster.utils.get_ossec_conf') as m:
        m.side_effect = WazuhException(1106)
        configuration = wazuh.core.cluster.utils.read_config()
        configuration[
            'disabled'] = 'yes' if configuration['disabled'] else 'no'
        assert configuration == default_cluster_configuration['cluster']
def test_results_AffectedItemsWazuhResult_message_property():
    """Test `message` property from class `AffectedItemsWazuhResult`."""
    messages = {'none_msg': 'none_msg', 'all_msg': 'all_msg', 'some_msg': 'some_msg'}
    # Force every possible case since this property returns a different value depending on affected_items
    none_msg_result = AffectedItemsWazuhResult(**messages).message
    all_msg_result = AffectedItemsWazuhResult(**messages, affected_items=['001']).message
    aux_result = AffectedItemsWazuhResult(**messages, affected_items=['001'])
    aux_result.add_failed_item(WazuhException(WAZUH_EXCEPTION_CODE))
    some_msg_result = aux_result.message
    assert messages
    assert len(messages) == 3
    assert (msg == item_msg for msg, item_msg in zip(messages, [none_msg_result, all_msg_result, some_msg_result]))
def test_results_AffectedItemsWazuhResult_render(get_wazuh_affected_item):
    """Test method `render` from class `AffectedItemsWazuhResult`."""
    param_list = [['001', '002'], 2, None, ['int'], [True, True], 'Sample message', 'Sample message', 'Sample message']
    affected_result = get_wazuh_affected_item(param_list)
    for agent_id in [FAILED_AGENT_ID, 'Invalid ID']:
        affected_result.add_failed_item(id_=agent_id, error=WazuhException(WAZUH_EXCEPTION_CODE))
    # Render a valid AffectedItemsWazuhResult and check it has all the expected fields
    render_result = affected_result.render()
    assert isinstance(render_result, dict)
    assert render_result
    assert (field in ['data', 'message'] for field in render_result)
    assert render_result['data']
    assert (field in ['affected_items', 'total_affected_items', 'total_failed_items', 'failed_items']
            for field in render_result['data'])
Exemple #9
0
def compress_files(name, list_path, cluster_control_json=None):
    """Create a zip with cluster_control.json and the files listed in list_path.

    Iterate the list of files and groups them in the zip. If a file does not
    exist, the cluster_control_json dictionary is updated.

    Parameters
    ----------
    name : str
        Name of the node to which the zip will be sent.
    list_path : list
        List of file paths to be zipped.
    cluster_control_json : dict
        KO files (path-metadata) to be zipped as a json.

    Returns
    -------
    zip_file_path : str
        Path where the zip file has been saved.
    """
    failed_files = list()
    zip_file_path = "{0}/queue/cluster/{1}/{1}-{2}-{3}.zip".format(
        common.ossec_path, name, time(),
        str(random())[2:])
    if not os.path.exists(os.path.dirname(zip_file_path)):
        mkdir_with_mode(os.path.dirname(zip_file_path))
    with zipfile.ZipFile(zip_file_path, 'x') as zf:
        # write files
        if list_path:
            for f in list_path:
                try:
                    zf.write(filename=common.ossec_path + f, arcname=f)
                except zipfile.LargeZipFile as e:
                    raise WazuhError(3001, str(e))
                except Exception as e:
                    logger.debug("[Cluster] {}".format(
                        str(WazuhException(3001, str(e)))))
                    failed_files.append(f)
        try:
            if cluster_control_json and failed_files:
                update_cluster_control_with_failed(failed_files,
                                                   cluster_control_json)
            zf.writestr("cluster_control.json",
                        json.dumps(cluster_control_json))
        except Exception as e:
            raise WazuhError(3001, str(e))

    return zip_file_path
Exemple #10
0
        async def update_file(name: str, data: Dict):
            """
            Updates a file from the worker. It checks the modification date to decide whether to update it or not.
            If it's a merged file, it unmerges it.
            :param name: Filename to update
            :param data: File metadata
            :return: None
            """
            # Full path
            full_path, error_updating_file, n_merged_files = common.ossec_path + name, False, 0

            # Cluster items information: write mode and permissions
            lock_full_path = "{}/queue/cluster/lockdir/{}.lock".format(common.ossec_path, os.path.basename(full_path))
            lock_file = open(lock_full_path, 'a+')
            try:
                fcntl.lockf(lock_file, fcntl.LOCK_EX)
                if os.path.basename(name) == 'client.keys':
                    self.logger.warning("Client.keys received in a master node")
                    raise WazuhException(3007)
                if data['merged']:
                    is_agent_info = data['merge_type'] == 'agent-info'
                    if is_agent_info:
                        self.sync_agent_info_status['total_agent_info'] = len(agent_ids)
                    else:
                        self.sync_extra_valid_status['total_extra_valid'] = len(agent_ids)
                    for file_path, file_data, file_time in cluster.unmerge_agent_info(data['merge_type'],
                                                                                      decompressed_files_path,
                                                                                      data['merge_name']):
                        full_unmerged_name = os.path.join(common.ossec_path, file_path)
                        tmp_unmerged_path = os.path.join(common.ossec_path, 'queue/cluster', self.name, os.path.basename(file_path))
                        try:
                            if is_agent_info:
                                agent_name_re = re.match(r'(^.+)-(.+)$', os.path.basename(file_path))
                                agent_name = agent_name_re.group(1) if agent_name_re else os.path.basename(file_path)
                                if agent_name not in agent_names:
                                    n_errors['warnings'][data['cluster_item_key']] = 1 \
                                        if n_errors['warnings'].get(data['cluster_item_key']) is None \
                                        else n_errors['warnings'][data['cluster_item_key']] + 1

                                    self.logger.debug2("Received status of an non-existent agent '{}'".format(agent_name))
                                    continue
                            else:
                                agent_id = os.path.basename(file_path)
                                if agent_id not in agent_ids:
                                    n_errors['warnings'][data['cluster_item_key']] = 1 \
                                        if n_errors['warnings'].get(data['cluster_item_key']) is None \
                                        else n_errors['warnings'][data['cluster_item_key']] + 1

                                    self.logger.debug2("Received group of an non-existent agent '{}'".format(agent_id))
                                    continue

                            try:
                                mtime = datetime.strptime(file_time, '%Y-%m-%d %H:%M:%S.%f')
                            except ValueError:
                                mtime = datetime.strptime(file_time, '%Y-%m-%d %H:%M:%S')

                            if os.path.isfile(full_unmerged_name):

                                local_mtime = datetime.utcfromtimestamp(int(os.stat(full_unmerged_name).st_mtime))
                                # check if the date is older than the manager's date
                                if local_mtime > mtime:
                                    logger.debug2("Receiving an old file ({})".format(file_path))
                                    continue

                            with open(tmp_unmerged_path, 'wb') as f:
                                f.write(file_data)

                            mtime_epoch = timegm(mtime.timetuple())
                            utils.safe_move(tmp_unmerged_path, full_unmerged_name,
                                            ownership=(common.ossec_uid(), common.ossec_gid()),
                                            permissions=self.cluster_items['files'][data['cluster_item_key']]['permissions'],
                                            time=(mtime_epoch, mtime_epoch)
                                            )
                        except Exception as e:
                            self.logger.error("Error updating agent group/status ({}): {}".format(tmp_unmerged_path, e))
                            if is_agent_info:
                                self.sync_agent_info_status['total_agent_info'] -= 1
                            else:
                                self.sync_extra_valid_status['total_extra_valid'] -= 1

                            n_errors['errors'][data['cluster_item_key']] = 1 \
                                if n_errors['errors'].get(data['cluster_item_key']) is None \
                                else n_errors['errors'][data['cluster_item_key']] + 1
                        await asyncio.sleep(0.0001)

                else:
                    zip_path = "{}{}".format(decompressed_files_path, name)
                    utils.safe_move(zip_path, full_path,
                                    ownership=(common.ossec_uid(), common.ossec_gid()),
                                    permissions=self.cluster_items['files'][data['cluster_item_key']]['permissions']
                                    )

            except WazuhException as e:
                logger.debug2("Warning updating file '{}': {}".format(name, e))
                error_tag = 'warnings'
                error_updating_file = True
            except Exception as e:
                logger.debug2("Error updating file '{}': {}".format(name, e))
                error_tag = 'errors'
                error_updating_file = True

            if error_updating_file:
                n_errors[error_tag][data['cluster_item_key']] = 1 if not n_errors[error_tag].get(
                    data['cluster_item_key']) \
                    else n_errors[error_tag][data['cluster_item_key']] + 1

            fcntl.lockf(lock_file, fcntl.LOCK_UN)
            lock_file.close()
Exemple #11
0
def get_wazuh_failed_item():
    item = AffectedItemsWazuhResult()
    item.add_failed_item(id_=FAILED_AGENT_ID, error=WazuhException(WAZUH_EXCEPTION_CODE))
    return item
Exemple #12
0
    failed_item = get_wazuh_failed_item

    # Expect 'affected_items': ['001', '002', '003']
    or_result_1 = affected_item_1 | affected_item_2
    assert set(agent_list_1 + agent_list_2) == set(or_result_1.affected_items)
    assert not or_result_1.failed_items

    # Expect new failed_item
    or_result_2 = or_result_1 | failed_item
    assert or_result_2.failed_items == failed_item.failed_items


@pytest.mark.parametrize('or_item, expected_result', [
    (WazuhError(WAZUH_EXCEPTION_CODE, ids=['001']), AffectedItemsWazuhResult),
    (WazuhError(WAZUH_EXCEPTION_CODE), WazuhException),
    (WazuhException(WAZUH_EXCEPTION_CODE), WazuhException),
    ({'Invalid type': None}, None)
])
def test_results_AffectedItemsWazuhResult___or___exceptions(or_item, expected_result):
    """Test raised exceptions from method `__or__` from class `AffectedItemsWazuhResult`."""
    affected_result = AffectedItemsWazuhResult()
    # Force an exception trying to use __or__ with an invalid type
    try:
        or_result = affected_result | or_item
        assert isinstance(or_result, expected_result)
    except WazuhException as e:
        if e.code != 1000:
            raise e


def test_results_AffectedItemsWazuhResult_to_dict():
Exemple #13
0
def walk_dir(dirname,
             recursive,
             files,
             excluded_files,
             excluded_extensions,
             get_cluster_item_key,
             get_md5=True,
             whoami='master'):
    walk_files = {}

    try:
        entries = listdir(common.ossec_path + dirname)
    except OSError as e:
        raise WazuhException(3015, str(e))

    for entry in entries:
        if entry in excluded_files or reduce(
                add, map(lambda x: entry[-(len(x)):] == x,
                         excluded_extensions)):
            continue

        try:
            full_path = path.join(dirname, entry)
            if entry in files or files == ["all"]:

                if not path.isdir(common.ossec_path + full_path):
                    file_mod_time = datetime.utcfromtimestamp(
                        stat(common.ossec_path + full_path).st_mtime)

                    if whoami == 'worker' and file_mod_time < (
                            datetime.utcnow() - timedelta(minutes=30)):
                        continue

                    entry_metadata = {
                        "mod_time": str(file_mod_time),
                        'cluster_item_key': get_cluster_item_key
                    }
                    if '.merged' in entry:
                        entry_metadata['merged'] = True
                        entry_metadata[
                            'merge_type'] = 'agent-info' if 'agent-info' in entry else 'agent-groups'
                        entry_metadata['merge_name'] = dirname + '/' + entry
                    else:
                        entry_metadata['merged'] = False

                    if get_md5:
                        entry_metadata['md5'] = md5(common.ossec_path +
                                                    full_path)

                    walk_files[full_path] = entry_metadata

            if recursive and path.isdir(common.ossec_path + full_path):
                walk_files.update(
                    walk_dir(full_path, recursive, files, excluded_files,
                             excluded_extensions, get_cluster_item_key,
                             get_md5, whoami))

        except Exception as e:
            logger.error("Could not get checksum of file {}: {}".format(
                entry, e))

    return walk_files
Exemple #14
0
        def update_file(name, data):
            # Full path
            full_path, error_updating_file, n_merged_files = common.ossec_path + name, False, 0

            # Cluster items information: write mode and permissions
            lock_full_path = "{}/queue/cluster/lockdir/{}.lock".format(
                common.ossec_path, os.path.basename(full_path))
            lock_file = open(lock_full_path, 'a+')
            try:
                fcntl.lockf(lock_file, fcntl.LOCK_EX)
                if os.path.basename(name) == 'client.keys':
                    self.logger.warning(
                        "Client.keys received in a master node")
                    raise WazuhException(3007)
                if data['merged']:
                    is_agent_info = data['merge_type'] == 'agent-info'
                    if is_agent_info:
                        self.sync_agent_info_status['total_agent_info'] = len(
                            agent_ids)
                    else:
                        self.sync_extra_valid_status[
                            'total_extra_valid'] = len(agent_ids)
                    for file_path, file_data, file_time in cluster.unmerge_agent_info(
                            data['merge_type'], decompressed_files_path,
                            data['merge_name']):
                        try:
                            full_unmerged_name = common.ossec_path + file_path
                            tmp_unmerged_path = full_unmerged_name + '.tmp'
                            if is_agent_info:
                                agent_name_re = re.match(
                                    r'(^.+)-(.+)$',
                                    os.path.basename(file_path))
                                agent_name = agent_name_re.group(
                                    1) if agent_name_re else os.path.basename(
                                        file_path)
                                if agent_name not in agent_names:
                                    n_errors['warnings'][data['cluster_item_key']] = 1 \
                                        if n_errors['warnings'].get(data['cluster_item_key']) is None \
                                        else n_errors['warnings'][data['cluster_item_key']] + 1

                                    self.logger.debug2(
                                        "Received status of an non-existent agent '{}'"
                                        .format(agent_name))
                                    continue
                            else:
                                agent_id = os.path.basename(file_path)
                                if agent_id not in agent_ids:
                                    n_errors['warnings'][data['cluster_item_key']] = 1 \
                                        if n_errors['warnings'].get(data['cluster_item_key']) is None \
                                        else n_errors['warnings'][data['cluster_item_key']] + 1

                                    self.logger.debug2(
                                        "Received group of an non-existent agent '{}'"
                                        .format(agent_id))
                                    continue

                            try:
                                mtime = datetime.strptime(
                                    file_time, '%Y-%m-%d %H:%M:%S.%f')
                            except ValueError:
                                mtime = datetime.strptime(
                                    file_time, '%Y-%m-%d %H:%M:%S')

                            if os.path.isfile(full_unmerged_name):

                                local_mtime = datetime.utcfromtimestamp(
                                    int(os.stat(full_unmerged_name).st_mtime))
                                # check if the date is older than the manager's date
                                if local_mtime > mtime:
                                    logger.debug2(
                                        "Receiving an old file ({})".format(
                                            file_path))
                                    return

                            with open(tmp_unmerged_path, 'wb') as f:
                                f.write(file_data)

                            mtime_epoch = timegm(mtime.timetuple())
                            os.utime(
                                tmp_unmerged_path,
                                (mtime_epoch, mtime_epoch))  # (atime, mtime)
                            os.chown(tmp_unmerged_path, common.ossec_uid,
                                     common.ossec_gid)
                            os.chmod(
                                tmp_unmerged_path, self.cluster_items['files'][
                                    data['cluster_item_key']]['permissions'])
                            os.rename(tmp_unmerged_path, full_unmerged_name)
                        except Exception as e:
                            self.logger.debug2(
                                "Error updating agent group/status: {}".format(
                                    e))
                            if is_agent_info:
                                self.sync_agent_info_status[
                                    'total_agent_info'] -= 1
                            else:
                                self.sync_extra_valid_status[
                                    'total_extra_valid'] -= 1

                            n_errors['errors'][data['cluster_item_key']] = 1 \
                                if n_errors['errors'].get(data['cluster_item_key']) is None \
                                else n_errors['errors'][data['cluster_item_key']] + 1

                else:
                    zip_path = "{}{}".format(decompressed_files_path, name)
                    os.chown(zip_path, common.ossec_uid, common.ossec_gid)
                    os.chmod(
                        zip_path, self.cluster_items['files'][
                            data['cluster_item_key']]['permissions'])
                    os.rename(zip_path, full_path)

            except WazuhException as e:
                logger.debug2("Warning updating file '{}': {}".format(name, e))
                error_tag = 'warnings'
                error_updating_file = True
            except Exception as e:
                logger.debug2("Error updating file '{}': {}".format(name, e))
                error_tag = 'errors'
                error_updating_file = True

            if error_updating_file:
                n_errors[error_tag][data['cluster_item_key']] = 1 if not n_errors[error_tag].get(
                    data['cluster_item_key']) \
                    else n_errors[error_tag][data['cluster_item_key']] + 1

            fcntl.lockf(lock_file, fcntl.LOCK_UN)
            lock_file.close()