Beispiel #1
0
    def get_health(self, filter_node) -> Dict:
        """Get nodes and synchronization information.

        Parameters
        ----------
        filter_node : dict
            Whether to filter by a node or return all health information.

        Returns
        -------
        dict
            Dict object containing nodes information.
        """
        workers_info = {key: val.to_dict() for key, val in self.clients.items()
                        if filter_node is None or filter_node == {} or key in filter_node}
        n_connected_nodes = len(workers_info)
        if filter_node is None or self.configuration['node_name'] in filter_node:
            workers_info.update({self.configuration['node_name']: self.to_dict()})

        # Get active agents by node and format last keep alive date format
        for node_name in workers_info.keys():
            workers_info[node_name]["info"]["n_active_agents"] = Agent.get_agents_overview(filters={'status': 'active', 'node_name': node_name})['totalItems']
            if workers_info[node_name]['info']['type'] != 'master':
                workers_info[node_name]['status']['last_keep_alive'] = str(
                    datetime.fromtimestamp(workers_info[node_name]['status']['last_keep_alive']))

        return {"n_connected_nodes": n_connected_nodes, "nodes": workers_info}
Beispiel #2
0
    def get_health(self, filter_node) -> Dict:
        """
        Return healthcheck data

        :param filter_node: Node to filter by
        :return: Dictionary
        """
        workers_info = {
            key: val.to_dict()
            for key, val in self.clients.items()
            if filter_node is None or filter_node == {} or key in filter_node
        }
        n_connected_nodes = len(workers_info)
        if filter_node is None or self.configuration[
                'node_name'] in filter_node:
            workers_info.update(
                {self.configuration['node_name']: self.to_dict()})

        # Get active agents by node and format last keep alive date format
        for node_name in workers_info.keys():
            workers_info[node_name]["info"][
                "n_active_agents"] = Agent.get_agents_overview(
                    filters={
                        'status': 'active',
                        'node_name': node_name
                    })['totalItems']
            if workers_info[node_name]['info']['type'] != 'master':
                workers_info[node_name]['status']['last_keep_alive'] = str(
                    datetime.fromtimestamp(
                        workers_info[node_name]['status']['last_keep_alive']))

        return {"n_connected_nodes": n_connected_nodes, "nodes": workers_info}
Beispiel #3
0
    def get_health(self, filter_node) -> Dict:
        """Get nodes and synchronization information.

        Parameters
        ----------
        filter_node : dict
            Whether to filter by a node or return all health information.

        Returns
        -------
        dict
            Dict object containing nodes information.
        """
        workers_info = {
            key: val.to_dict()
            for key, val in self.clients.items()
            if filter_node is None or filter_node == {} or key in filter_node
        }
        n_connected_nodes = len(workers_info)
        if filter_node is None or self.configuration[
                'node_name'] in filter_node:
            workers_info.update(
                {self.configuration['node_name']: self.to_dict()})

        # Get active agents by node and format last keep alive date format
        active_agents = Agent.get_agents_overview(filters={
            'status': 'active',
            'node_name': filter_node
        })['items']
        for agent in active_agents:
            if (agent_node := agent["node_name"]) in workers_info.keys():
                workers_info[agent_node]["info"]["n_active_agents"] = \
                    workers_info[agent_node]["info"].get("n_active_agents", 0) + 1
Beispiel #4
0
    def remove_bulk_agents(agent_ids_list: KeysView, logger):
        """
        Removes files created by agents in worker nodes. This function doesn't remove agents from client.keys since the
        client.keys file is overwritten by the master node.
        :param agent_ids_list: List of agents ids to remove.
        :param logger: Logger to use
        :return: None.
        """

        def remove_agent_file_type(agent_files: List[str]):
            """
            Removes files if they exist
            :param agent_files: Path regexes of the files to remove
            :return: None
            """
            for filetype in agent_files:

                filetype_glob = filetype.format(ossec_path=common.ossec_path, id='*', name='*', ip='*')
                filetype_agent = {filetype.format(ossec_path=common.ossec_path, id=a['id'], name=a['name'], ip=a['ip'])
                                  for a in agent_info}

                for agent_file in set(glob.iglob(filetype_glob)) & filetype_agent:
                    logger.debug2("Removing {}".format(agent_file))
                    if os.path.isdir(agent_file):
                        shutil.rmtree(agent_file)
                    else:
                        os.remove(agent_file)

        if not agent_ids_list:
            return  # the function doesn't make sense if there is no agents to remove

        logger.info("Removing files from {} agents".format(len(agent_ids_list)))
        logger.debug("Agents to remove: {}".format(', '.join(agent_ids_list)))
        # Remove agents in group of 500 elements (so wazuh-db socket is not saturated)
        for agents_ids_sublist in itertools.zip_longest(*itertools.repeat(iter(agent_ids_list), 500), fillvalue='0'):
            agents_ids_sublist = list(filter(lambda x: x != '0', agents_ids_sublist))
            # Get info from DB
            agent_info = Agent.get_agents_overview(q=",".join(["id={}".format(i) for i in agents_ids_sublist]),
                                                   select=['ip', 'id', 'name'], limit=None)['items']
            logger.debug2("Removing files from agents {}".format(', '.join(agents_ids_sublist)))

            files_to_remove = ['{ossec_path}/queue/rootcheck/({name}) {ip}->rootcheck',
                               '{ossec_path}/queue/diff/{name}', '{ossec_path}/queue/agent-groups/{id}',
                               '{ossec_path}/queue/rids/{id}',
                               '{ossec_path}/var/db/agents/{name}-{id}.db']
            remove_agent_file_type(files_to_remove)

            logger.debug2("Removing agent group assigments from database")
            # remove agent from groups
            wdb_conn = WazuhDBConnection()

            query_to_execute = 'global sql delete from belongs where {}'.format(' or '.join([
                'id_agent = {}'.format(agent_id) for agent_id in agents_ids_sublist
            ]))
            wdb_conn.run_wdb_command(query_to_execute)

        logger.info("Agent files removed")
Beispiel #5
0
    async def process_files_from_worker(self, files_metadata: Dict,
                                        decompressed_files_path: str, logger):
        """Iterate over received files from worker and updates the local ones.

        Parameters
        ----------
        files_metadata : dict
            Dictionary containing file metadata (each key is a filepath and each value its metadata).
        decompressed_files_path : str
            Filepath of the decompressed received zipfile.
        logger : Logger object
            The logger to use.
        """
        async def update_file(name: str, data: Dict):
            """Update a local file with one received from a worker.

            The modification date is checked to decide whether to update ir or not.

            Parameters
            ----------
            name : str
                Relative path of the file.
            data : dict
                Metadata of the file (MD5, merged, etc).
            """
            # Full path
            full_path, error_updating_file = os.path.join(
                common.wazuh_path, name), False

            try:
                # Only valid client.keys is the local one (master).
                if os.path.basename(name) == 'client.keys':
                    self.logger.warning(
                        "Client.keys received in a master node")
                    raise exception.WazuhClusterError(3007)

                # If the file is merged, create individual files from it.
                if data['merged']:
                    for file_path, file_data, file_time in wazuh.core.cluster.cluster.unmerge_info(
                            data['merge_type'], decompressed_files_path,
                            data['merge_name']):
                        # Destination path.
                        full_unmerged_name = os.path.join(
                            common.wazuh_path, file_path)
                        # Path where to create the file before moving it to the destination path (with safe_move).
                        tmp_unmerged_path = os.path.join(
                            common.wazuh_path, 'queue', 'cluster', self.name,
                            os.path.basename(file_path))

                        try:
                            agent_id = os.path.basename(file_path)
                            # If the agent does not exist on the master, do not copy its file from the worker.
                            if agent_id not in agent_ids:
                                n_errors['warnings'][data['cluster_item_key']] = 1 \
                                    if n_errors['warnings'].get(data['cluster_item_key']) is None \
                                    else n_errors['warnings'][data['cluster_item_key']] + 1

                                self.logger.debug2(
                                    f"Received group of an non-existent agent '{agent_id}'"
                                )
                                continue

                            # Format the file_data specified inside the merged file.
                            try:
                                mtime = datetime.strptime(
                                    file_time, '%Y-%m-%d %H:%M:%S.%f')
                            except ValueError:
                                mtime = datetime.strptime(
                                    file_time, '%Y-%m-%d %H:%M:%S')

                            # If the file already existed, check if it is older than the one to be copied from worker.
                            if os.path.isfile(full_unmerged_name):
                                local_mtime = datetime.utcfromtimestamp(
                                    int(os.stat(full_unmerged_name).st_mtime))
                                if local_mtime > mtime:
                                    logger.debug2(
                                        f"Receiving an old file ({file_path})")
                                    continue

                            # Create file in temporal path and safe move it to the destination path.
                            with open(tmp_unmerged_path, 'wb') as f:
                                f.write(file_data)

                            mtime_epoch = timegm(mtime.timetuple())
                            utils.safe_move(
                                tmp_unmerged_path,
                                full_unmerged_name,
                                ownership=(common.ossec_uid(),
                                           common.ossec_gid()),
                                permissions=self.cluster_items['files'][
                                    data['cluster_item_key']]['permissions'],
                                time=(mtime_epoch, mtime_epoch))
                            self.integrity_sync_status[
                                'total_extra_valid'] += 1
                        except Exception as e:
                            self.logger.error(
                                f"Error updating agent group/status ({tmp_unmerged_path}): {e}"
                            )

                            n_errors['errors'][data['cluster_item_key']] = 1 \
                                if n_errors['errors'].get(data['cluster_item_key']) is None \
                                else n_errors['errors'][data['cluster_item_key']] + 1
                        await asyncio.sleep(0.0001)

                # If the file is not merged, move it directly to the destination path.
                else:
                    zip_path = os.path.join(decompressed_files_path, name)
                    utils.safe_move(zip_path,
                                    full_path,
                                    ownership=(common.ossec_uid(),
                                               common.ossec_gid()),
                                    permissions=self.cluster_items['files']
                                    [data['cluster_item_key']]['permissions'])

            except exception.WazuhException as e:
                logger.debug2(f"Warning updating file '{name}': {e}")
                error_tag = 'warnings'
                error_updating_file = True
            except Exception as e:
                logger.debug2(f"Error updating file '{name}': {e}")
                error_tag = 'errors'
                error_updating_file = True

            if error_updating_file:
                n_errors[error_tag][data['cluster_item_key']] = 1 if not n_errors[error_tag].get(
                    data['cluster_item_key']) \
                    else n_errors[error_tag][data['cluster_item_key']] + 1

        n_errors = {'errors': {}, 'warnings': {}}

        # Get ID of all agents.
        try:
            agents = Agent.get_agents_overview(select=['name'],
                                               limit=None)['items']
            agent_ids = set(map(operator.itemgetter('id'), agents))
        except Exception as e:
            logger.debug2(f"Error getting agent ids: {e}")
            agent_ids = {}

        # Iterate and update each file specified in 'files_metadata' if conditions are meets.
        try:
            for filename, data in files_metadata.items():
                await update_file(data=data, name=filename)
        except Exception as e:
            self.logger.error(f"Error updating worker files: '{e}'.")
            raise e

        # Log errors if any.
        if sum(n_errors['errors'].values()) > 0:
            logger.error("Errors updating worker files: {}".format(' | '.join([
                '{}: {}'.format(key, value)
                for key, value in n_errors['errors'].items()
            ])))
        if sum(n_errors['warnings'].values()) > 0:
            for key, value in n_errors['warnings'].items():
                if key == 'queue/agent-groups/':
                    logger.debug2(
                        f"Received {value} group assignments for non-existent agents. Skipping."
                    )
Beispiel #6
0
    async def process_files_from_worker(self, files_checksums: Dict, decompressed_files_path: str, logger):
        """
        Iterates over received files from worker and updates the local ones
        :param files_checksums: A dictionary containing file metadata
        :param decompressed_files_path: Filepath of the decompressed received zipfile
        :param logger: The logger to use
        :return: None
        """
        async def update_file(name: str, data: Dict):
            """
            Updates a file from the worker. It checks the modification date to decide whether to update it or not.
            If it's a merged file, it unmerges it.
            :param name: Filename to update
            :param data: File metadata
            :return: None
            """
            # Full path
            full_path, error_updating_file, n_merged_files = common.ossec_path + name, False, 0

            # Cluster items information: write mode and permissions
            lock_full_path = "{}/queue/cluster/lockdir/{}.lock".format(common.ossec_path, os.path.basename(full_path))
            lock_file = open(lock_full_path, 'a+')
            try:
                fcntl.lockf(lock_file, fcntl.LOCK_EX)
                if os.path.basename(name) == 'client.keys':
                    self.logger.warning("Client.keys received in a master node")
                    raise exception.WazuhClusterError(3007)
                if data['merged']:
                    self.sync_extra_valid_status['total_extra_valid'] = len(agent_ids)
                    for file_path, file_data, file_time in wazuh.core.cluster.cluster.unmerge_info(data['merge_type'],
                                                                                                   decompressed_files_path,
                                                                                                   data['merge_name']):
                        full_unmerged_name = os.path.join(common.ossec_path, file_path)
                        tmp_unmerged_path = os.path.join(common.ossec_path, 'queue/cluster', self.name, os.path.basename(file_path))
                        try:
                            agent_id = os.path.basename(file_path)
                            if agent_id not in agent_ids:
                                n_errors['warnings'][data['cluster_item_key']] = 1 \
                                    if n_errors['warnings'].get(data['cluster_item_key']) is None \
                                    else n_errors['warnings'][data['cluster_item_key']] + 1

                                self.logger.debug2("Received group of an non-existent agent '{}'".format(agent_id))
                                continue

                            try:
                                mtime = datetime.strptime(file_time, '%Y-%m-%d %H:%M:%S.%f')
                            except ValueError:
                                mtime = datetime.strptime(file_time, '%Y-%m-%d %H:%M:%S')

                            if os.path.isfile(full_unmerged_name):

                                local_mtime = datetime.utcfromtimestamp(int(os.stat(full_unmerged_name).st_mtime))
                                # check if the date is older than the manager's date
                                if local_mtime > mtime:
                                    logger.debug2("Receiving an old file ({})".format(file_path))
                                    continue

                            with open(tmp_unmerged_path, 'wb') as f:
                                f.write(file_data)

                            mtime_epoch = timegm(mtime.timetuple())
                            utils.safe_move(tmp_unmerged_path, full_unmerged_name,
                                            ownership=(common.ossec_uid(), common.ossec_gid()),
                                            permissions=self.cluster_items['files'][data['cluster_item_key']]['permissions'],
                                            time=(mtime_epoch, mtime_epoch)
                                            )
                        except Exception as e:
                            self.logger.error("Error updating agent group/status ({}): {}".format(tmp_unmerged_path, e))
                            self.sync_extra_valid_status['total_extra_valid'] -= 1

                            n_errors['errors'][data['cluster_item_key']] = 1 \
                                if n_errors['errors'].get(data['cluster_item_key']) is None \
                                else n_errors['errors'][data['cluster_item_key']] + 1
                        await asyncio.sleep(0.0001)

                else:
                    zip_path = "{}{}".format(decompressed_files_path, name)
                    utils.safe_move(zip_path, full_path,
                                    ownership=(common.ossec_uid(), common.ossec_gid()),
                                    permissions=self.cluster_items['files'][data['cluster_item_key']]['permissions']
                                    )

            except exception.WazuhException as e:
                logger.debug2("Warning updating file '{}': {}".format(name, e))
                error_tag = 'warnings'
                error_updating_file = True
            except Exception as e:
                logger.debug2("Error updating file '{}': {}".format(name, e))
                error_tag = 'errors'
                error_updating_file = True

            if error_updating_file:
                n_errors[error_tag][data['cluster_item_key']] = 1 if not n_errors[error_tag].get(
                    data['cluster_item_key']) \
                    else n_errors[error_tag][data['cluster_item_key']] + 1

            fcntl.lockf(lock_file, fcntl.LOCK_UN)
            lock_file.close()

        # tmp path
        tmp_path = "/queue/cluster/{}/tmp_files".format(self.name)
        n_merged_files = 0
        n_errors = {'errors': {}, 'warnings': {}}

        # create temporary directory for lock files
        lock_directory = "{}/queue/cluster/lockdir".format(common.ossec_path)
        if not os.path.exists(lock_directory):
            utils.mkdir_with_mode(lock_directory)

        try:
            agents = Agent.get_agents_overview(select=['name'], limit=None)['items']
            agent_ids = set(map(operator.itemgetter('id'), agents))
        except Exception as e:
            logger.debug2("Error getting agent ids: {}".format(e))
            agent_ids = {}

        try:
            for filename, data in files_checksums.items():
                await update_file(data=data, name=filename)
        except Exception as e:
            self.logger.error("Error updating worker files: '{}'.".format(e))
            raise e

        if sum(n_errors['errors'].values()) > 0:
            logger.error("Errors updating worker files: {}".format(' | '.join(
                ['{}: {}'.format(key, value) for key, value
                 in n_errors['errors'].items()])
            ))
        if sum(n_errors['warnings'].values()) > 0:
            for key, value in n_errors['warnings'].items():
                if key == '/queue/agent-groups/':
                    logger.debug2("Received {} group assignments for non-existent agents. Skipping.".format(value))
Beispiel #7
0
    def remove_bulk_agents(agent_ids_list: KeysView, logger):
        """
        Removes files created by agents in worker nodes. This function doesn't remove agents from client.keys since the
        client.keys file is overwritten by the master node.
        :param agent_ids_list: List of agents ids to remove.
        :param logger: Logger to use
        :return: None.
        """
        def remove_agent_file_type(agent_files: List[str]):
            """
            Removes files if they exist
            :param agent_files: Path regexes of the files to remove
            :return: None
            """
            for filetype in agent_files:

                filetype_glob = filetype.format(ossec_path=common.ossec_path,
                                                id='*',
                                                name='*',
                                                ip='*')
                filetype_agent = {
                    filetype.format(ossec_path=common.ossec_path,
                                    id=a['id'],
                                    name=a['name'],
                                    ip=a['ip'])
                    for a in agent_info
                }

                for agent_file in set(
                        glob.iglob(filetype_glob)) & filetype_agent:
                    logger.debug2("Removing {}".format(agent_file))
                    if os.path.isdir(agent_file):
                        shutil.rmtree(agent_file)
                    else:
                        os.remove(agent_file)

        if not agent_ids_list:
            return  # the function doesn't make sense if there is no agents to remove

        logger.info("Removing files from {} agents".format(
            len(agent_ids_list)))
        logger.debug("Agents to remove: {}".format(', '.join(agent_ids_list)))
        # the agents must be removed in groups of 997: 999 is the limit of SQL variables per query. Limit and offset are
        # always included in the SQL query, so that leaves 997 variables as limit.
        for agents_ids_sublist in itertools.zip_longest(*itertools.repeat(
                iter(agent_ids_list), 997),
                                                        fillvalue='0'):
            agents_ids_sublist = list(
                filter(lambda x: x != '0', agents_ids_sublist))
            # Get info from DB
            agent_info = Agent.get_agents_overview(q=",".join(
                ["id={}".format(i) for i in agents_ids_sublist]),
                                                   select=['ip', 'id', 'name'],
                                                   limit=None)['items']
            logger.debug2("Removing files from agents {}".format(
                ', '.join(agents_ids_sublist)))

            files_to_remove = [
                '{ossec_path}/queue/agent-info/{name}-{ip}',
                '{ossec_path}/queue/rootcheck/({name}) {ip}->rootcheck',
                '{ossec_path}/queue/diff/{name}',
                '{ossec_path}/queue/agent-groups/{id}',
                '{ossec_path}/queue/rids/{id}',
                '{ossec_path}/var/db/agents/{name}-{id}.db'
            ]
            remove_agent_file_type(files_to_remove)

            logger.debug2("Removing agent group assigments from database")
            # remove agent from groups
            db_global = glob.glob(common.database_path_global)
            if not db_global:
                raise WazuhInternalError(1600)

            conn = Connection(db_global[0])
            agent_ids_db = {
                'id_agent{}'.format(i): int(i)
                for i in agents_ids_sublist
            }
            conn.execute(
                'delete from belongs where {}'.format(' or '.join([
                    'id_agent = :{}'.format(i) for i in agent_ids_db.keys()
                ])), agent_ids_db)
            conn.commit()

            # Tell wazuhbd to delete agent database
            wdb_conn = WazuhDBConnection()
            wdb_conn.delete_agents_db(agents_ids_sublist)

        logger.info("Agent files removed")
Beispiel #8
0
#    - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/var/ossec/framework/lib

from sys import path, exit
import json
# cwd = /var/ossec/api/framework/examples
#framework_path = '{0}'.format(path[0][:-9])
# cwd = /var/ossec/api
#framework_path = '{0}/framework'.format(path[0])
# Default path
framework_path = '/var/ossec/api/framework'
path.append(framework_path)

try:
    from wazuh import Wazuh
    from wazuh.core.agent import Agent
except Exception as e:
    print("No module 'wazuh' found.")
    exit()

if __name__ == "__main__":

    # Creating wazuh object
    # It is possible to specify the ossec path (path argument) or get /etc/ossec-init.conf (get_init argument)
    print("\nWazuh:")
    myWazuh = Wazuh()
    print(myWazuh)

    print("\nAgents:")
    agents = Agent.get_agents_overview()
    print(json.dumps(agents, indent=4, sort_keys=True))