Esempio n. 1
0
        def overwrite_or_create_files(filename: str, data: Dict):
            """
            Updates a file coming from the master
            :param filename: Filename to update
            :param data: File metadata such as modification time, whether it's a merged file or not, etc.
            :return: None
            """
            full_filename_path = common.ossec_path + filename
            if os.path.basename(filename) == 'client.keys':
                self._check_removed_agents("{}{}".format(zip_path, filename),
                                           logger)

            if data['merged']:  # worker nodes can only receive agent-groups files
                if data['merge-type'] == 'agent-info':
                    logger.warning("Agent status received in a worker node")
                    raise WazuhInternalError(3011)

                for name, content, _ in wazuh.core.cluster.cluster.unmerge_agent_info(
                        'agent-groups', zip_path, filename):
                    full_unmerged_name = os.path.join(common.ossec_path, name)
                    tmp_unmerged_path = full_unmerged_name + '.tmp'
                    with open(tmp_unmerged_path, 'wb') as f:
                        f.write(content)
                    safe_move(tmp_unmerged_path,
                              full_unmerged_name,
                              permissions=self.cluster_items['files'][
                                  data['cluster_item_key']]['permissions'],
                              ownership=(common.ossec_uid(),
                                         common.ossec_gid()))
            else:
                if not os.path.exists(os.path.dirname(full_filename_path)):
                    utils.mkdir_with_mode(os.path.dirname(full_filename_path))
                safe_move("{}{}".format(zip_path, filename),
                          full_filename_path,
                          permissions=self.cluster_items['files'][
                              data['cluster_item_key']]['permissions'],
                          ownership=(common.ossec_uid(), common.ossec_gid()))
Esempio n. 2
0
        def overwrite_or_create_files(filename: str, data: Dict):
            """Update a file coming from the master.

            Move a file which is inside the unzipped directory that comes from master to the path
            specified in 'filename'. If the file is 'merged' type, it is first split into files
            and then moved to their final directory.

            Parameters
            ----------
            filename : str
                Filename inside unzipped dir to update.
            data : dict
                File metadata such as modification time, whether it's a merged file or not, etc.
            """
            full_filename_path = os.path.join(common.wazuh_path, filename)

            if data['merged']:  # worker nodes can only receive agent-groups files
                # Split merged file into individual files inside zipdir (directory containing unzipped files),
                # and then move each one to the destination directory (<wazuh_path>/filename).
                for name, content, _ in cluster.unmerge_info('agent-groups', zip_path, filename):
                    full_unmerged_name = os.path.join(common.wazuh_path, name)
                    tmp_unmerged_path = full_unmerged_name + '.tmp'
                    with open(tmp_unmerged_path, 'wb') as f:
                        f.write(content)
                    safe_move(tmp_unmerged_path, full_unmerged_name,
                              permissions=cluster_items['files'][data['cluster_item_key']]['permissions'],
                              ownership=(common.wazuh_uid(), common.wazuh_gid())
                              )
            else:
                # Create destination dir if it doesn't exist.
                if not os.path.exists(os.path.dirname(full_filename_path)):
                    utils.mkdir_with_mode(os.path.dirname(full_filename_path))
                # Move the file from zipdir (directory containing unzipped files) to <wazuh_path>/filename.
                safe_move(os.path.join(zip_path, filename), full_filename_path,
                          permissions=cluster_items['files'][data['cluster_item_key']]['permissions'],
                          ownership=(common.wazuh_uid(), common.wazuh_gid())
                          )
Esempio n. 3
0
async def decompress_files(zip_path, ko_files_name="files_metadata.json"):
    """Unzip files in a directory and load the files_metadata.json as a dict.

    Parameters
    ----------
    zip_path : str
        Full path to the zip file.
    ko_files_name : str
        Name of the metadata json inside zip file.

    Returns
    -------
    ko_files : dict
        Paths (keys) and metadata (values) of the files listed in cluster.json.
    zip_dir : str
        Full path to unzipped directory.
    """
    try:
        ko_files = ""
        # Create a directory like {wazuh_path}/{cluster_path}/123456-123456.zipdir/
        zip_dir = zip_path + 'dir'
        mkdir_with_mode(zip_dir)
        with zipfile.ZipFile(zip_path) as zipf:
            zipf.extractall(path=zip_dir)

        if path.exists(path.join(zip_dir, ko_files_name)):
            with open(path.join(zip_dir, ko_files_name)) as ko:
                ko_files = json.loads(ko.read())
    except Exception as e:
        if path.exists(zip_dir):
            shutil.rmtree(zip_dir)
        raise e
    finally:
        # Once read all files, remove the zipfile.
        remove(zip_path)
    return ko_files, zip_dir
Esempio n. 4
0
    async def process_files_from_worker(self, files_checksums: Dict, decompressed_files_path: str, logger):
        """
        Iterates over received files from worker and updates the local ones
        :param files_checksums: A dictionary containing file metadata
        :param decompressed_files_path: Filepath of the decompressed received zipfile
        :param logger: The logger to use
        :return: None
        """
        async def update_file(name: str, data: Dict):
            """
            Updates a file from the worker. It checks the modification date to decide whether to update it or not.
            If it's a merged file, it unmerges it.
            :param name: Filename to update
            :param data: File metadata
            :return: None
            """
            # Full path
            full_path, error_updating_file, n_merged_files = common.ossec_path + name, False, 0

            # Cluster items information: write mode and permissions
            lock_full_path = "{}/queue/cluster/lockdir/{}.lock".format(common.ossec_path, os.path.basename(full_path))
            lock_file = open(lock_full_path, 'a+')
            try:
                fcntl.lockf(lock_file, fcntl.LOCK_EX)
                if os.path.basename(name) == 'client.keys':
                    self.logger.warning("Client.keys received in a master node")
                    raise exception.WazuhClusterError(3007)
                if data['merged']:
                    self.sync_extra_valid_status['total_extra_valid'] = len(agent_ids)
                    for file_path, file_data, file_time in wazuh.core.cluster.cluster.unmerge_info(data['merge_type'],
                                                                                                   decompressed_files_path,
                                                                                                   data['merge_name']):
                        full_unmerged_name = os.path.join(common.ossec_path, file_path)
                        tmp_unmerged_path = os.path.join(common.ossec_path, 'queue/cluster', self.name, os.path.basename(file_path))
                        try:
                            agent_id = os.path.basename(file_path)
                            if agent_id not in agent_ids:
                                n_errors['warnings'][data['cluster_item_key']] = 1 \
                                    if n_errors['warnings'].get(data['cluster_item_key']) is None \
                                    else n_errors['warnings'][data['cluster_item_key']] + 1

                                self.logger.debug2("Received group of an non-existent agent '{}'".format(agent_id))
                                continue

                            try:
                                mtime = datetime.strptime(file_time, '%Y-%m-%d %H:%M:%S.%f')
                            except ValueError:
                                mtime = datetime.strptime(file_time, '%Y-%m-%d %H:%M:%S')

                            if os.path.isfile(full_unmerged_name):

                                local_mtime = datetime.utcfromtimestamp(int(os.stat(full_unmerged_name).st_mtime))
                                # check if the date is older than the manager's date
                                if local_mtime > mtime:
                                    logger.debug2("Receiving an old file ({})".format(file_path))
                                    continue

                            with open(tmp_unmerged_path, 'wb') as f:
                                f.write(file_data)

                            mtime_epoch = timegm(mtime.timetuple())
                            utils.safe_move(tmp_unmerged_path, full_unmerged_name,
                                            ownership=(common.ossec_uid(), common.ossec_gid()),
                                            permissions=self.cluster_items['files'][data['cluster_item_key']]['permissions'],
                                            time=(mtime_epoch, mtime_epoch)
                                            )
                        except Exception as e:
                            self.logger.error("Error updating agent group/status ({}): {}".format(tmp_unmerged_path, e))
                            self.sync_extra_valid_status['total_extra_valid'] -= 1

                            n_errors['errors'][data['cluster_item_key']] = 1 \
                                if n_errors['errors'].get(data['cluster_item_key']) is None \
                                else n_errors['errors'][data['cluster_item_key']] + 1
                        await asyncio.sleep(0.0001)

                else:
                    zip_path = "{}{}".format(decompressed_files_path, name)
                    utils.safe_move(zip_path, full_path,
                                    ownership=(common.ossec_uid(), common.ossec_gid()),
                                    permissions=self.cluster_items['files'][data['cluster_item_key']]['permissions']
                                    )

            except exception.WazuhException as e:
                logger.debug2("Warning updating file '{}': {}".format(name, e))
                error_tag = 'warnings'
                error_updating_file = True
            except Exception as e:
                logger.debug2("Error updating file '{}': {}".format(name, e))
                error_tag = 'errors'
                error_updating_file = True

            if error_updating_file:
                n_errors[error_tag][data['cluster_item_key']] = 1 if not n_errors[error_tag].get(
                    data['cluster_item_key']) \
                    else n_errors[error_tag][data['cluster_item_key']] + 1

            fcntl.lockf(lock_file, fcntl.LOCK_UN)
            lock_file.close()

        # tmp path
        tmp_path = "/queue/cluster/{}/tmp_files".format(self.name)
        n_merged_files = 0
        n_errors = {'errors': {}, 'warnings': {}}

        # create temporary directory for lock files
        lock_directory = "{}/queue/cluster/lockdir".format(common.ossec_path)
        if not os.path.exists(lock_directory):
            utils.mkdir_with_mode(lock_directory)

        try:
            agents = Agent.get_agents_overview(select=['name'], limit=None)['items']
            agent_ids = set(map(operator.itemgetter('id'), agents))
        except Exception as e:
            logger.debug2("Error getting agent ids: {}".format(e))
            agent_ids = {}

        try:
            for filename, data in files_checksums.items():
                await update_file(data=data, name=filename)
        except Exception as e:
            self.logger.error("Error updating worker files: '{}'.".format(e))
            raise e

        if sum(n_errors['errors'].values()) > 0:
            logger.error("Errors updating worker files: {}".format(' | '.join(
                ['{}: {}'.format(key, value) for key, value
                 in n_errors['errors'].items()])
            ))
        if sum(n_errors['warnings'].values()) > 0:
            for key, value in n_errors['warnings'].items():
                if key == '/queue/agent-groups/':
                    logger.debug2("Received {} group assignments for non-existent agents. Skipping.".format(value))
Esempio n. 5
0
    async def process_files_from_worker(self, files_checksums: Dict,
                                        decompressed_files_path: str, logger):
        """Iterate over received files from worker and updates the local ones.

        Parameters
        ----------
        files_checksums : dict
            Dictionary containing file metadata (each key is a filepath and each value its metadata).
        decompressed_files_path : str
            Filepath of the decompressed received zipfile.
        logger : Logger object
            The logger to use.
        """
        async def update_file(name: str, data: Dict):
            """Update a local file with one received from a worker.

            The modification date is checked to decide whether to update ir or not.

            Parameters
            ----------
            name : str
                Relative path of the file.
            data : dict
                Metadata of the file (MD5, merged, etc).
            """
            # Full path
            full_path, error_updating_file, n_merged_files = common.ossec_path + name, False, 0

            # Cluster items information: write mode and permissions.
            lock_full_path = "{}/queue/cluster/lockdir/{}.lock".format(
                common.ossec_path, os.path.basename(full_path))
            lock_file = open(lock_full_path, 'a+')
            try:
                fcntl.lockf(lock_file, fcntl.LOCK_EX)
                # Only valid client.keys is the local one (master).
                if os.path.basename(name) == 'client.keys':
                    self.logger.warning(
                        "Client.keys received in a master node")
                    raise exception.WazuhClusterError(3007)

                # If the file is merged, create individual files from it.
                if data['merged']:
                    self.sync_extra_valid_status['total_extra_valid'] = len(
                        agent_ids)
                    for file_path, file_data, file_time in wazuh.core.cluster.cluster.unmerge_info(
                            data['merge_type'], decompressed_files_path,
                            data['merge_name']):
                        # Destination path.
                        full_unmerged_name = os.path.join(
                            common.ossec_path, file_path)
                        # Path where to create the file before moving it to the destination path (with safe_move).
                        tmp_unmerged_path = os.path.join(
                            common.ossec_path, 'queue/cluster', self.name,
                            os.path.basename(file_path))

                        try:
                            agent_id = os.path.basename(file_path)
                            # If the agent does not exist on the master, do not copy its file from the worker.
                            if agent_id not in agent_ids:
                                n_errors['warnings'][data['cluster_item_key']] = 1 \
                                    if n_errors['warnings'].get(data['cluster_item_key']) is None \
                                    else n_errors['warnings'][data['cluster_item_key']] + 1

                                self.logger.debug2(
                                    "Received group of an non-existent agent '{}'"
                                    .format(agent_id))
                                continue

                            # Format the file_data specified inside the merged file.
                            try:
                                mtime = datetime.strptime(
                                    file_time, '%Y-%m-%d %H:%M:%S.%f')
                            except ValueError:
                                mtime = datetime.strptime(
                                    file_time, '%Y-%m-%d %H:%M:%S')

                            # If the file already existed, check if it is older than the one to be copied from worker.
                            if os.path.isfile(full_unmerged_name):
                                local_mtime = datetime.utcfromtimestamp(
                                    int(os.stat(full_unmerged_name).st_mtime))
                                if local_mtime > mtime:
                                    logger.debug2(
                                        "Receiving an old file ({})".format(
                                            file_path))
                                    continue

                            # Create file in temporal path and safe move it to the destination path.
                            with open(tmp_unmerged_path, 'wb') as f:
                                f.write(file_data)

                            mtime_epoch = timegm(mtime.timetuple())
                            utils.safe_move(
                                tmp_unmerged_path,
                                full_unmerged_name,
                                ownership=(common.ossec_uid(),
                                           common.ossec_gid()),
                                permissions=self.cluster_items['files'][
                                    data['cluster_item_key']]['permissions'],
                                time=(mtime_epoch, mtime_epoch))
                        except Exception as e:
                            self.logger.error(
                                "Error updating agent group/status ({}): {}".
                                format(tmp_unmerged_path, e))
                            self.sync_extra_valid_status[
                                'total_extra_valid'] -= 1

                            n_errors['errors'][data['cluster_item_key']] = 1 \
                                if n_errors['errors'].get(data['cluster_item_key']) is None \
                                else n_errors['errors'][data['cluster_item_key']] + 1
                        await asyncio.sleep(0.0001)

                # If the file is not merged, move it directly to the destination path.
                else:
                    zip_path = "{}{}".format(decompressed_files_path, name)
                    utils.safe_move(zip_path,
                                    full_path,
                                    ownership=(common.ossec_uid(),
                                               common.ossec_gid()),
                                    permissions=self.cluster_items['files']
                                    [data['cluster_item_key']]['permissions'])

            except exception.WazuhException as e:
                logger.debug2("Warning updating file '{}': {}".format(name, e))
                error_tag = 'warnings'
                error_updating_file = True
            except Exception as e:
                logger.debug2("Error updating file '{}': {}".format(name, e))
                error_tag = 'errors'
                error_updating_file = True

            if error_updating_file:
                n_errors[error_tag][data['cluster_item_key']] = 1 if not n_errors[error_tag].get(
                    data['cluster_item_key']) \
                    else n_errors[error_tag][data['cluster_item_key']] + 1

            fcntl.lockf(lock_file, fcntl.LOCK_UN)
            lock_file.close()

        n_merged_files = 0
        n_errors = {'errors': {}, 'warnings': {}}

        # Create temporary directory for lock files.
        lock_directory = "{}/queue/cluster/lockdir".format(common.ossec_path)
        if not os.path.exists(lock_directory):
            utils.mkdir_with_mode(lock_directory)

        # Get ID of all agents.
        try:
            agents = Agent.get_agents_overview(select=['name'],
                                               limit=None)['items']
            agent_ids = set(map(operator.itemgetter('id'), agents))
        except Exception as e:
            logger.debug2("Error getting agent ids: {}".format(e))
            agent_ids = {}

        # Iterate and update each file specified in 'files_checksums' if conditions are meets.
        try:
            for filename, data in files_checksums.items():
                await update_file(data=data, name=filename)
        except Exception as e:
            self.logger.error("Error updating worker files: '{}'.".format(e))
            raise e

        # Log errors if any.
        if sum(n_errors['errors'].values()) > 0:
            logger.error("Errors updating worker files: {}".format(' | '.join([
                '{}: {}'.format(key, value)
                for key, value in n_errors['errors'].items()
            ])))
        if sum(n_errors['warnings'].values()) > 0:
            for key, value in n_errors['warnings'].items():
                if key == '/queue/agent-groups/':
                    logger.debug2(
                        "Received {} group assignments for non-existent agents. Skipping."
                        .format(value))