Esempio n. 1
0
 def add_worker(self, data, ip, handler):
     worker_id = Server.add_worker(self, data, ip, handler)
     # create directory in /queue/cluster to store all node's file there
     node_path = "{}/queue/cluster/{}".format(common.ossec_path, worker_id)
     if not os.path.exists(node_path):
         mkdir_with_mode(node_path)
     return worker_id
Esempio n. 2
0
    def hello(self, data: bytes) -> Tuple[bytes, bytes]:
        """
        Processes "hello" command sent by a worker right after it connects to the server. It also initializes
        the task loggers.

        :param data: Node name, cluster name, node type and wazuh version all separated by spaces.
        :return: response command and payload.
        """
        name, cluster_name, node_type, version = data.split(b' ')
        cmd, payload = super().hello(name)

        self.task_loggers = {'Integrity': self.setup_task_logger('Integrity'),
                             'Extra valid': self.setup_task_logger('Extra valid'),
                             'Agent info': self.setup_task_logger('Agent info')}

        self.version, self.cluster_name, self.node_type = version.decode(), cluster_name.decode(), node_type.decode()

        if self.cluster_name != self.server.configuration['name']:
            cmd, payload = b'err', b'Worker does not belong to the same cluster'
        elif self.version != metadata.__version__:
            cmd, payload = b'err', b'Worker and master versions are not the same'

        worker_dir = '{}/queue/cluster/{}'.format(common.ossec_path, self.name)
        if cmd == b'ok' and not os.path.exists(worker_dir):
            utils.mkdir_with_mode(worker_dir)
        return cmd, payload
Esempio n. 3
0
def compress_files(name, list_path, cluster_control_json=None):
    zip_file_path = "{0}/queue/cluster/{1}/{1}-{2}-{3}.zip".format(
        common.ossec_path, name, time(),
        str(random())[2:])
    if not os.path.exists(os.path.dirname(zip_file_path)):
        mkdir_with_mode(os.path.dirname(zip_file_path))
    with zipfile.ZipFile(zip_file_path, 'x') as zf:
        # write files
        if list_path:
            for f in list_path:
                try:
                    zf.write(filename=common.ossec_path + f, arcname=f)
                except zipfile.LargeZipFile as e:
                    raise WazuhException(3001, str(e))
                except Exception as e:
                    logger.error("[Cluster] {}".format(
                        str(WazuhException(3001, str(e)))))

        try:
            zf.writestr("cluster_control.json",
                        json.dumps(cluster_control_json))
        except Exception as e:
            raise WazuhException(3001, str(e))

    return zip_file_path
Esempio n. 4
0
        def overwrite_or_create_files(filename, data):
            full_filename_path = common.ossec_path + filename
            if os.path.basename(filename) == 'client.keys':
                self._check_removed_agents("{}{}".format(zip_path, filename),
                                           logger)

            if data['merged']:  # worker nodes can only receive agent-groups files
                if data['merge-type'] == 'agent-info':
                    logger.warning("Agent status received in a worker node")
                    raise WazuhException(3011)

                for name, content, _ in cluster.unmerge_agent_info(
                        'agent-groups', zip_path, filename):
                    full_unmerged_name = common.ossec_path + name
                    tmp_unmerged_path = full_unmerged_name + '.tmp'
                    with open(tmp_unmerged_path, 'wb') as f:
                        f.write(content)
                    os.chown(tmp_unmerged_path, common.ossec_uid,
                             common.ossec_gid)
                    os.rename(tmp_unmerged_path, full_unmerged_name)
            else:
                if not os.path.exists(os.path.dirname(full_filename_path)):
                    utils.mkdir_with_mode(os.path.dirname(full_filename_path))
                os.rename("{}{}".format(zip_path, filename),
                          full_filename_path)
                os.chown(full_filename_path, common.ossec_uid,
                         common.ossec_gid)
                os.chmod(
                    full_filename_path, self.cluster_items['files'][
                        data['cluster_item_key']]['permissions'])
Esempio n. 5
0
        def overwrite_or_create_files(filename: str, data: Dict):
            """
            Updates a file coming from the master
            :param filename: Filename to update
            :param data: File metadata such as modification time, whether it's a merged file or not, etc.
            :return: None
            """
            full_filename_path = common.ossec_path + filename
            if os.path.basename(filename) == 'client.keys':
                self._check_removed_agents("{}{}".format(zip_path, filename), logger)

            if data['merged']:  # worker nodes can only receive agent-groups files
                if data['merge-type'] == 'agent-info':
                    logger.warning("Agent status received in a worker node")
                    raise WazuhException(3011)

                for name, content, _ in cluster.unmerge_agent_info('agent-groups', zip_path, filename):
                    full_unmerged_name = os.path.join(common.ossec_path, name)
                    tmp_unmerged_path = full_unmerged_name + '.tmp'
                    with open(tmp_unmerged_path, 'wb') as f:
                        f.write(content)
                    safe_move(tmp_unmerged_path, full_unmerged_name,
                              permissions=self.cluster_items['files'][data['cluster_item_key']]['permissions'],
                              ownership=(common.ossec_uid(), common.ossec_gid())
                              )
            else:
                if not os.path.exists(os.path.dirname(full_filename_path)):
                    utils.mkdir_with_mode(os.path.dirname(full_filename_path))
                safe_move("{}{}".format(zip_path, filename), full_filename_path,
                          permissions=self.cluster_items['files'][data['cluster_item_key']]['permissions'],
                          ownership=(common.ossec_uid(), common.ossec_gid())
                          )
Esempio n. 6
0
 def connection_result(self, future_result):
     super().connection_result(future_result)
     if self.connected:
         # create directory for temporary files
         worker_tmp_files = '{}/queue/cluster/{}'.format(
             common.ossec_path, self.name)
         if not os.path.exists(worker_tmp_files):
             utils.mkdir_with_mode(worker_tmp_files)
Esempio n. 7
0
 def connection_result(self, future_result):
     """
     Callback function called when the master sends a response to the hello command sent by the worker.
     :param future_result: Result of the hello request
     """
     super().connection_result(future_result)
     if self.connected:
         # create directory for temporary files
         worker_tmp_files = '{}/queue/cluster/{}'.format(common.ossec_path, self.name)
         if not os.path.exists(worker_tmp_files):
             utils.mkdir_with_mode(worker_tmp_files)
Esempio n. 8
0
def decompress_files(zip_path, ko_files_name="cluster_control.json"):
    ko_files = ""
    zip_dir = zip_path + 'dir'
    mkdir_with_mode(zip_dir)
    with zipfile.ZipFile(zip_path) as zipf:
        zipf.extractall(path=zip_dir)

    if os.path.exists("{}/{}".format(zip_dir, ko_files_name)):
        with open("{}/{}".format(zip_dir, ko_files_name)) as ko:
            ko_files = json.loads(ko.read())

    # once read all files, remove the zipfile
    remove(zip_path)
    return ko_files, zip_dir
Esempio n. 9
0
    def computeArchivesDirectory(self, rotated_filepath):
        """
        Based on the name of the rotated file, compute in which directory it should be stored.

        :param rotated_filepath: Filepath of the rotated log
        :return: New directory path
        """
        rotated_file = path.basename(rotated_filepath)
        year, month, day = re.match(r'[\w\.]+\.(\d+)-(\d+)-(\d+)', rotated_file).groups()
        month = month_abbr[int(month)]

        log_path = '{}/logs/cluster/{}/{}'.format(common.ossec_path, year, month)
        if not path.exists(log_path):
            mkdir_with_mode(log_path, 0o750)

        return '{}/cluster-{}.log.gz'.format(log_path, day)
Esempio n. 10
0
    def hello(self, data: bytes) -> Tuple[bytes, bytes]:
        name, cluster_name, node_type, version = data.split(b' ')
        cmd, payload = super().hello(name)

        self.task_loggers = {'Integrity': self.setup_task_logger('Integrity'),
                             'Extra valid': self.setup_task_logger('Extra valid'),
                             'Agent info': self.setup_task_logger('Agent info')}

        self.version, self.cluster_name, self.node_type = version.decode(), cluster_name.decode(), node_type.decode()

        if self.cluster_name != self.server.configuration['name']:
            cmd, payload = b'err', b'Worker does not belong to the same cluster'
        elif self.version != metadata.__version__:
            cmd, payload = b'err', b'Worker and master versions are not the same'

        worker_dir = '{}/queue/cluster/{}'.format(common.ossec_path, self.name)
        if cmd == b'ok' and not os.path.exists(worker_dir):
            utils.mkdir_with_mode(worker_dir)
        return cmd, payload
Esempio n. 11
0
def decompress_files(zip_path, ko_files_name="cluster_control.json"):
    ko_files = ""
    zip_dir = zip_path + 'dir'
    mkdir_with_mode(zip_dir)
    with zipfile.ZipFile(zip_path) as zipf:
        for name in zipf.namelist():
            if name == ko_files_name:
                ko_files = json.loads(zipf.open(name).read())
            else:
                filename = "{}/{}".format(zip_dir, path.dirname(name))
                if not path.exists(filename):
                    mkdir_with_mode(filename)
                with open("{}/{}".format(filename, path.basename(name)),
                          'wb') as f:
                    content = zipf.open(name).read()
                    f.write(content)

    # once read all files, remove the zipfile
    remove(zip_path)
    return ko_files, zip_dir
Esempio n. 12
0
    async def process_files_from_worker(self, files_checksums: Dict, decompressed_files_path: str, logger):
        """
        Iterates over received files from worker and updates the local ones
        :param files_checksums: A dictionary containing file metadata
        :param decompressed_files_path: Filepath of the decompressed received zipfile
        :param logger: The logger to use
        :return: None
        """
        async def update_file(name: str, data: Dict):
            """
            Updates a file from the worker. It checks the modification date to decide whether to update it or not.
            If it's a merged file, it unmerges it.
            :param name: Filename to update
            :param data: File metadata
            :return: None
            """
            # Full path
            full_path, error_updating_file, n_merged_files = common.ossec_path + name, False, 0

            # Cluster items information: write mode and permissions
            lock_full_path = "{}/queue/cluster/lockdir/{}.lock".format(common.ossec_path, os.path.basename(full_path))
            lock_file = open(lock_full_path, 'a+')
            try:
                fcntl.lockf(lock_file, fcntl.LOCK_EX)
                if os.path.basename(name) == 'client.keys':
                    self.logger.warning("Client.keys received in a master node")
                    raise WazuhException(3007)
                if data['merged']:
                    is_agent_info = data['merge_type'] == 'agent-info'
                    if is_agent_info:
                        self.sync_agent_info_status['total_agent_info'] = len(agent_ids)
                    else:
                        self.sync_extra_valid_status['total_extra_valid'] = len(agent_ids)
                    for file_path, file_data, file_time in cluster.unmerge_agent_info(data['merge_type'],
                                                                                      decompressed_files_path,
                                                                                      data['merge_name']):
                        full_unmerged_name = os.path.join(common.ossec_path, file_path)
                        tmp_unmerged_path = os.path.join(common.ossec_path, 'queue/cluster', self.name, os.path.basename(file_path))
                        try:
                            if is_agent_info:
                                agent_name_re = re.match(r'(^.+)-(.+)$', os.path.basename(file_path))
                                agent_name = agent_name_re.group(1) if agent_name_re else os.path.basename(file_path)
                                if agent_name not in agent_names:
                                    n_errors['warnings'][data['cluster_item_key']] = 1 \
                                        if n_errors['warnings'].get(data['cluster_item_key']) is None \
                                        else n_errors['warnings'][data['cluster_item_key']] + 1

                                    self.logger.debug2("Received status of an non-existent agent '{}'".format(agent_name))
                                    continue
                            else:
                                agent_id = os.path.basename(file_path)
                                if agent_id not in agent_ids:
                                    n_errors['warnings'][data['cluster_item_key']] = 1 \
                                        if n_errors['warnings'].get(data['cluster_item_key']) is None \
                                        else n_errors['warnings'][data['cluster_item_key']] + 1

                                    self.logger.debug2("Received group of an non-existent agent '{}'".format(agent_id))
                                    continue

                            try:
                                mtime = datetime.strptime(file_time, '%Y-%m-%d %H:%M:%S.%f')
                            except ValueError:
                                mtime = datetime.strptime(file_time, '%Y-%m-%d %H:%M:%S')

                            if os.path.isfile(full_unmerged_name):

                                local_mtime = datetime.utcfromtimestamp(int(os.stat(full_unmerged_name).st_mtime))
                                # check if the date is older than the manager's date
                                if local_mtime > mtime:
                                    logger.debug2("Receiving an old file ({})".format(file_path))
                                    continue

                            with open(tmp_unmerged_path, 'wb') as f:
                                f.write(file_data)

                            mtime_epoch = timegm(mtime.timetuple())
                            utils.safe_move(tmp_unmerged_path, full_unmerged_name,
                                            ownership=(common.ossec_uid(), common.ossec_gid()),
                                            permissions=self.cluster_items['files'][data['cluster_item_key']]['permissions'],
                                            time=(mtime_epoch, mtime_epoch)
                                            )
                        except Exception as e:
                            self.logger.error("Error updating agent group/status ({}): {}".format(tmp_unmerged_path, e))
                            if is_agent_info:
                                self.sync_agent_info_status['total_agent_info'] -= 1
                            else:
                                self.sync_extra_valid_status['total_extra_valid'] -= 1

                            n_errors['errors'][data['cluster_item_key']] = 1 \
                                if n_errors['errors'].get(data['cluster_item_key']) is None \
                                else n_errors['errors'][data['cluster_item_key']] + 1
                        await asyncio.sleep(0.0001)

                else:
                    zip_path = "{}{}".format(decompressed_files_path, name)
                    utils.safe_move(zip_path, full_path,
                                    ownership=(common.ossec_uid(), common.ossec_gid()),
                                    permissions=self.cluster_items['files'][data['cluster_item_key']]['permissions']
                                    )

            except WazuhException as e:
                logger.debug2("Warning updating file '{}': {}".format(name, e))
                error_tag = 'warnings'
                error_updating_file = True
            except Exception as e:
                logger.debug2("Error updating file '{}': {}".format(name, e))
                error_tag = 'errors'
                error_updating_file = True

            if error_updating_file:
                n_errors[error_tag][data['cluster_item_key']] = 1 if not n_errors[error_tag].get(
                    data['cluster_item_key']) \
                    else n_errors[error_tag][data['cluster_item_key']] + 1

            fcntl.lockf(lock_file, fcntl.LOCK_UN)
            lock_file.close()

        # tmp path
        tmp_path = "/queue/cluster/{}/tmp_files".format(self.name)
        n_merged_files = 0
        n_errors = {'errors': {}, 'warnings': {}}

        # create temporary directory for lock files
        lock_directory = "{}/queue/cluster/lockdir".format(common.ossec_path)
        if not os.path.exists(lock_directory):
            utils.mkdir_with_mode(lock_directory)

        try:
            agents = Agent.get_agents_overview(select={'fields': ['name']}, limit=None)['items']
            agent_names = set(map(operator.itemgetter('name'), agents))
            agent_ids = set(map(operator.itemgetter('id'), agents))
        except Exception as e:
            logger.debug2("Error getting agent ids and names: {}".format(e))
            agent_names, agent_ids = {}, {}

        try:
            for filename, data in files_checksums.items():
                await update_file(data=data, name=filename)

            shutil.rmtree(decompressed_files_path)

        except Exception as e:
            self.logger.error("Error updating worker files: '{}'.".format(e))
            raise e

        if sum(n_errors['errors'].values()) > 0:
            logger.error("Errors updating worker files: {}".format(' | '.join(
                ['{}: {}'.format(key, value) for key, value
                 in n_errors['errors'].items()])
            ))
        if sum(n_errors['warnings'].values()) > 0:
            for key, value in n_errors['warnings'].items():
                if key == '/queue/agent-info/':
                    logger.debug2("Received {} agent statuses for non-existent agents. Skipping.".format(value))
                elif key == '/queue/agent-groups/':
                    logger.debug2("Received {} group assignments for non-existent agents. Skipping.".format(value))
Esempio n. 13
0
    def _update_worker_files_in_master(self, json_file, zip_dir_path,
                                       worker_name, cluster_control_key,
                                       cluster_control_subkey, tag):
        def update_file(n_errors,
                        name,
                        data,
                        file_time=None,
                        content=None,
                        agents=None):
            # Full path
            full_path = common.ossec_path + name
            error_updating_file = False

            # Cluster items information: write mode and umask
            w_mode = cluster_items[data['cluster_item_key']]['write_mode']
            umask = cluster_items[data['cluster_item_key']]['umask']

            if content is None:
                zip_path = "{}/{}".format(zip_dir_path, name)
                with open(zip_path, 'rb') as f:
                    content = f.read()

            lock_full_path = "{}/queue/cluster/lockdir/{}.lock".format(
                common.ossec_path, os.path.basename(full_path))
            lock_file = open(lock_full_path, 'a+')
            try:
                fcntl.lockf(lock_file, fcntl.LOCK_EX)
                _update_file(file_path=name,
                             new_content=content,
                             umask_int=umask,
                             mtime=file_time,
                             w_mode=w_mode,
                             tmp_dir=tmp_path,
                             whoami='master',
                             agents=agents)

            except WazuhException as e:
                logger.debug2("{}: Warning updating file '{}': {}".format(
                    tag, name, e))
                error_tag = 'warnings'
                error_updating_file = True
            except Exception as e:
                logger.debug2("{}: Error updating file '{}': {}".format(
                    tag, name, e))
                error_tag = 'errors'
                error_updating_file = True

            if error_updating_file:
                n_errors[error_tag][data['cluster_item_key']] = 1 if not n_errors[error_tag].get(data['cluster_item_key']) \
                                                                  else n_errors[error_tag][data['cluster_item_key']] + 1

            fcntl.lockf(lock_file, fcntl.LOCK_UN)
            lock_file.close()

            return n_errors, error_updating_file

        # tmp path
        tmp_path = "/queue/cluster/{}/tmp_files".format(worker_name)
        cluster_items = get_cluster_items()['files']
        n_merged_files = 0
        n_errors = {'errors': {}, 'warnings': {}}

        # create temporary directory for lock files
        lock_directory = "{}/queue/cluster/lockdir".format(common.ossec_path)
        if not os.path.exists(lock_directory):
            mkdir_with_mode(lock_directory)

        try:
            agents = Agent.get_agents_overview(select={'fields': ['name']},
                                               limit=None)['items']
            agent_names = set(map(itemgetter('name'), agents))
            agent_ids = set(map(itemgetter('id'), agents))
        except Exception as e:
            logger.debug2("{}: Error getting agent ids and names: {}".format(
                tag, e))
            agent_names, agent_ids = {}, {}

        before = time.time()
        try:
            for filename, data in json_file.items():
                if data['merged']:
                    for file_path, file_data, file_time in unmerge_agent_info(
                            data['merge_type'], zip_dir_path,
                            data['merge_name']):
                        n_errors, error_updating_file = update_file(
                            n_errors, file_path, data, file_time, file_data,
                            (agent_names, agent_ids))
                        if not error_updating_file:
                            n_merged_files += 1

                        if self.stopper.is_set():
                            break
                else:
                    n_errors, _ = update_file(n_errors, filename, data)

        except Exception as e:
            logger.error("{}: Error updating worker files: '{}'.".format(
                tag, e))
            raise e

        after = time.time()
        logger.debug(
            "{0}: Time updating worker files: {1:.2f}s. Total of updated worker files: {2}."
            .format(tag, after - before, n_merged_files))

        if sum(n_errors['errors'].values()) > 0:
            logging.error("{}: Errors updating worker files: {}".format(
                tag, ' | '.join([
                    '{}: {}'.format(key, value)
                    for key, value in n_errors['errors'].items()
                ])))
        if sum(n_errors['warnings'].values()) > 0:
            for key, value in n_errors['warnings'].items():
                if key == '/queue/agent-info/':
                    logger.debug2(
                        "Received {} agent statuses for non-existent agents. Skipping."
                        .format(value))
                elif key == '/queue/agent-groups/':
                    logger.debug2(
                        "Received {} group assignments for non-existent agents. Skipping."
                        .format(value))

        # Save info for healthcheck
        self.manager.set_worker_status(worker_id=self.name,
                                       key=cluster_control_key,
                                       subkey=cluster_control_subkey,
                                       status=n_merged_files)
Esempio n. 14
0
 def handle_connect(self):
     WorkerHandler.handle_connect(self)
     dir_path = "{}/queue/cluster/{}".format(common.ossec_path, self.name)
     if not os.path.exists(dir_path):
         mkdir_with_mode(dir_path)
Esempio n. 15
0
def _update_file(file_path,
                 new_content,
                 umask_int=None,
                 mtime=None,
                 w_mode=None,
                 tmp_dir='/queue/cluster',
                 whoami='master',
                 agents=None):

    dst_path = common.ossec_path + file_path
    if path.basename(dst_path) == 'client.keys':
        if whoami == 'worker':
            _check_removed_agents(new_content.split('\n'))
        else:
            logger.warning(
                "[Cluster] Client.keys file received in a master node.")
            raise WazuhException(3007)

    is_agent_info = 'agent-info' in dst_path
    is_agent_group = 'agent-groups' in dst_path
    if is_agent_info or is_agent_group:
        if whoami == 'master':
            agent_names, agent_ids = agents

            if is_agent_info:
                agent_name_re = re.match(r'(^.+)-(.+)$',
                                         path.basename(file_path))
                agent_name = agent_name_re.group(
                    1) if agent_name_re else path.basename(file_path)
                if agent_name not in agent_names:
                    raise WazuhException(3010, agent_name)
            elif is_agent_group:
                agent_id = path.basename(file_path)
                if agent_id not in agent_ids:
                    raise WazuhException(3010, agent_id)

            try:
                mtime = datetime.strptime(mtime, '%Y-%m-%d %H:%M:%S.%f')
            except ValueError:
                mtime = datetime.strptime(mtime, '%Y-%m-%d %H:%M:%S')

            if path.isfile(dst_path):

                local_mtime = datetime.utcfromtimestamp(
                    int(stat(dst_path).st_mtime))
                # check if the date is older than the manager's date
                if local_mtime > mtime:
                    logger.debug2(
                        "[Cluster] Receiving an old file ({})".format(
                            dst_path))  # debug2
                    return
        elif is_agent_info:
            logger.warning("[Cluster] Agent-info received in a worker node.")
            raise WazuhException(3011)

    # Write
    if w_mode == "atomic":
        f_temp = "{}{}{}.cluster.tmp".format(common.ossec_path, tmp_dir,
                                             file_path)
    else:
        f_temp = '{0}'.format(dst_path)

    if umask_int:
        oldumask = umask(umask_int)

    try:
        dest_file = open(f_temp, "w")
    except IOError as e:
        if e.errno == errno.ENOENT:
            dirpath = path.dirname(f_temp)
            mkdir_with_mode(dirpath)
            chmod(dirpath, S_IRWXU | S_IRWXG)
            dest_file = open(f_temp, "w")
        else:
            raise e

    dest_file.write(new_content)

    if umask_int:
        umask(oldumask)

    dest_file.close()

    if mtime:
        mtime_epoch = timegm(mtime.timetuple())
        utime(f_temp, (mtime_epoch, mtime_epoch))  # (atime, mtime)

    # Atomic
    if w_mode == "atomic":
        dirpath = path.dirname(dst_path)
        if not os.path.exists(dirpath):
            mkdir_with_mode(dirpath)
            chmod(path.dirname(dst_path), S_IRWXU | S_IRWXG)
        chown(f_temp, common.ossec_uid, common.ossec_gid)
        rename(f_temp, dst_path)
Esempio n. 16
0
    def _update_client_files_in_master(self, json_file, files_to_update_json,
                                       zip_dir_path, client_name,
                                       cluster_control_key,
                                       cluster_control_subkey, tag):
        def update_file(n_errors,
                        name,
                        data,
                        file_time=None,
                        content=None,
                        agents=None):
            # Full path
            full_path = common.ossec_path + name

            # Cluster items information: write mode and umask
            w_mode = cluster_items[data['cluster_item_key']]['write_mode']
            umask = int(cluster_items[data['cluster_item_key']]['umask'],
                        base=0)

            if content is None:
                zip_path = "{}/{}".format(zip_dir_path, name)
                with open(zip_path, 'rb') as f:
                    content = f.read()

            lock_full_path = "{}/queue/cluster/lockdir/{}.lock".format(
                common.ossec_path, os.path.basename(full_path))
            lock_file = open(lock_full_path, 'a+')
            try:
                fcntl.lockf(lock_file, fcntl.LOCK_EX)
                _update_file(file_path=name,
                             new_content=content,
                             umask_int=umask,
                             mtime=file_time,
                             w_mode=w_mode,
                             tmp_dir=tmp_path,
                             whoami='master',
                             agents=agents)

            except Exception as e:
                logger.debug2("{}: Error updating file '{}': {}".format(
                    tag, name, e))
                n_errors[data['cluster_item_key']] = 1 if not n_errors.get(data['cluster_item_key']) \
                                                          else n_errors[data['cluster_item_key']] + 1

            fcntl.lockf(lock_file, fcntl.LOCK_UN)
            lock_file.close()

            return n_errors

        # tmp path
        tmp_path = "/queue/cluster/{}/tmp_files".format(client_name)
        cluster_items = get_cluster_items()['files']
        n_agentsinfo = 0
        n_agentgroups = 0
        n_errors = {}

        # create temporary directory for lock files
        lock_directory = "{}/queue/cluster/lockdir".format(common.ossec_path)
        if not os.path.exists(lock_directory):
            mkdir_with_mode(lock_directory)

        try:
            agents = Agent.get_agents_overview(select={'fields': ['name']},
                                               limit=None)['items']
            agent_names = set(map(itemgetter('name'), agents))
            agent_ids = set(map(itemgetter('id'), agents))
            agents = None
        except Exception as e:
            logger.debug2("{}: Error getting agent ids and names: {}".format(
                tag, e))
            agent_names, agent_ids = {}, {}

        before = time.time()
        try:
            for filename, data in json_file.items():
                if data['merged']:
                    for file_path, file_data, file_time in unmerge_agent_info(
                            data['merge_type'], zip_dir_path,
                            data['merge_name']):
                        n_errors = update_file(n_errors, file_path, data,
                                               file_time, file_data,
                                               (agent_names, agent_ids))
                        if data['merge_type'] == 'agent-info':
                            n_agentsinfo += 1
                        else:
                            n_agentgroups += 1

                        if self.stopper.is_set():
                            break
                else:
                    n_errors = update_file(n_errors, filename, data)

        except Exception as e:
            logger.error("{}: Error updating client files: '{}'.".format(
                tag, e))
            raise e

        after = time.time()
        logger.debug(
            "{0}: Time updating client files: {1:.2f}s. Agents-info updated total: {2}. Agent-groups updated total: {3}."
            .format(tag, after - before, n_agentsinfo, n_agentgroups))

        if sum(n_errors.values()) > 0:
            logging.error("{}: Errors updating client files: {}".format(
                tag, ' | '.join([
                    '{}: {}'.format(key, value)
                    for key, value in n_errors.items()
                ])))

        # Save info for healthcheck
        status_number = n_agentsinfo if cluster_control_key == 'last_sync_agentinfo' else n_agentgroups
        self.manager.set_client_status(client_id=self.name,
                                       key=cluster_control_key,
                                       subkey=cluster_control_subkey,
                                       status=status_number)