示例#1
0
    async def sync_integrity(self, task_name: str, received_file: asyncio.Event):
        logger = self.task_loggers['Integrity']

        self.sync_integrity_status['date_start_master'] = str(datetime.now())

        logger.info("Waiting to receive zip file from worker")
        await asyncio.wait_for(received_file.wait(),
                               timeout=self.cluster_items['intervals']['communication']['timeout_receiving_file'])
        received_filename = self.sync_tasks[task_name].filename
        if received_filename == 'Error':
            logger.info("Stopping synchronization process: worker files weren't correctly received.")
            return
        logger.debug("Received file from worker: '{}'".format(received_filename))

        files_checksums, decompressed_files_path = cluster.decompress_files(received_filename)
        logger.info("Analyzing worker integrity: Received {} files to check.".format(len(files_checksums)))

        # classify files in shared, missing, extra and extra valid.
        worker_files_ko, counts = cluster.compare_files(self.server.integrity_control, files_checksums, self.name)

        # health check
        self.sync_integrity_status['total_files'] = counts
        shutil.rmtree(decompressed_files_path)

        if not functools.reduce(operator.add, map(len, worker_files_ko.values())):
            logger.info("Analyzing worker integrity: Files checked. There are no KO files.")
            result = await self.send_request(command=b'sync_m_c_ok', data=b'')
        else:
            logger.info("Analyzing worker integrity: Files checked. There are KO files.")

            # Compress data: master files (only KO shared and missing)
            logger.debug("Analyzing worker integrity: Files checked. Compressing KO files.")
            master_files_paths = worker_files_ko['shared'].keys() | worker_files_ko['missing'].keys()
            compressed_data = cluster.compress_files(self.name, master_files_paths, worker_files_ko)

            logger.info("Analyzing worker integrity: Files checked. KO files compressed.")
            task_name = await self.send_request(command=b'sync_m_c', data=b'')
            if task_name.startswith(b'Error'):
                logger.error(task_name.decode())
                return task_name

            result = await self.send_file(compressed_data)
            os.unlink(compressed_data)
            if result.startswith(b'Error'):
                self.logger.error("Error sending files information: {}".format(result.decode()))
                result = await self.send_request(command=b'sync_m_c_e', data=task_name + b' ' + b'Error')
            else:
                result = await self.send_request(command=b'sync_m_c_e',
                                                 data=task_name + b' ' + compressed_data.replace(common.ossec_path, '').encode())

            if result.startswith(b'Error'):
                self.logger.error(result.decode())

        self.sync_integrity_status['date_end_master'] = str(datetime.now())
        self.sync_integrity_free = True
        logger.info("Finished integrity synchronization.")
        return result
示例#2
0
    async def sync_worker_files(self, task_name: str, received_file: asyncio.Event, logger):
        logger.info("Waiting to receive zip file from worker")
        await asyncio.wait_for(received_file.wait(),
                               timeout=self.cluster_items['intervals']['communication']['timeout_receiving_file'])
        received_filename = self.sync_tasks[task_name].filename
        if received_filename == 'Error':
            logger.info("Stopping synchronization process: worker files weren't correctly received.")
            return

        logger.debug("Received file from worker: '{}'".format(received_filename))

        files_checksums, decompressed_files_path = cluster.decompress_files(received_filename)
        logger.info("Analyzing worker files: Received {} files to check.".format(len(files_checksums)))
        self.process_files_from_worker(files_checksums, decompressed_files_path, logger)
示例#3
0
    def process_files_from_client(self, client_name, data_received, cluster_control_key, cluster_control_subkey, tag=None):
        sync_result = False

        # Save info for healthcheck
        self.manager.set_client_status(client_id=self.name, key=cluster_control_key, subkey="date_start_master", status=datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-4])
        self.manager.set_client_status(client_id=self.name, key=cluster_control_key, subkey="date_end_master", status="In progress")
        self.manager.set_client_status(client_id=self.name, key=cluster_control_key, subkey=cluster_control_subkey, status="In progress")
        # ---

        if not tag:
            tag = "[Master] [process_files_from_client]"

        # Extract received data
        logger.info("{0}: Analyzing received files: Start.".format(tag))

        try:
            json_file, zip_dir_path = decompress_files(data_received)
        except Exception as e:
            logger.error("{0}: Error decompressing data: {1}.".format(tag, str(e)))
            raise e

        if json_file:
            client_files_json = json_file['client_files']
        else:
            raise Exception("cluster_control.json not included in received zip file")

        logger.info("{0}: Analyzing received files: End.".format(tag))

        logger.info("{0}: Updating master files: Start.".format(tag))

        # Update files
        self._update_client_files_in_master(client_files_json, zip_dir_path, client_name,
                                            cluster_control_key, cluster_control_subkey,
                                            tag)

        # Remove tmp directory created when zip file was received
        shutil.rmtree(zip_dir_path)

        logger.info("{0}: Updating master files: End.".format(tag))

        sync_result = True

        # Save info for healthcheck
        self.manager.set_client_status(client_id=self.name, key=cluster_control_key, subkey="date_end_master", status=datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-4])

        return sync_result
示例#4
0
    def process_files_from_master(self, data_received, tag=None):

        if not tag:
            tag = "[Worker] [process_files_from_master]"


        logger.info("{0}: Analyzing received files: Start.".format(tag))

        try:
            ko_files, zip_path  = decompress_files(data_received)
        except Exception as e:
            logger.error("{}: Error decompressing files from master: {}".format(tag, str(e)))
            raise e

        if ko_files:
            logger.info("{0}: Analyzing received files: Missing: {1}. Shared: {2}. Extra: {3}. ExtraValid: {4}".format(tag, len(ko_files['missing']), len(ko_files['shared']), len(ko_files['extra']), len(ko_files['extra_valid'])))
            logger.debug2("{0}: Received cluster_control.json: {1}".format(tag, ko_files))
        else:
            raise Exception("cluster_control.json not included in received zip file.")

        logger.info("{0}: Analyzing received files: End.".format(tag))

        # Update files
        if ko_files['extra_valid']:
            logger.info("{0}: Master requires some worker files. Sending.".format(tag))
            if not "SyncExtraValidFilesThread" in set(map(lambda x: type(x).__name__, threading.enumerate())):
                req_files_thread = SyncExtraValidFilesThread(self, self.stopper, ko_files['extra_valid'])
                req_files_thread.start()
            else:
                logger.warning("{}: The last master's file request is in progress. Rejecting this request.".format(tag))

        if not ko_files['shared'] and not ko_files['missing'] and not ko_files['extra']:
            logger.info("{0}: Worker meets integrity checks. No actions.".format(tag))
            sync_result = True
        else:
            logger.info("{0}: Worker does not meet integrity checks. Actions required.".format(tag))

            logger.info("{0}: Updating files: Start.".format(tag))
            sync_result = self._update_master_files_in_worker(ko_files, zip_path, tag)
            logger.info("{0}: Updating files: End.".format(tag))

        # remove temporal zip file directory
        shutil.rmtree(zip_path)

        return sync_result
示例#5
0
文件: worker.py 项目: prtkgpta/wazuh
    async def process_files_from_master(self, name: str,
                                        file_received: asyncio.Event):
        await asyncio.wait_for(file_received.wait(),
                               timeout=self.cluster_items['intervals']
                               ['communication']['timeout_receiving_file'])

        received_filename = self.sync_tasks[name].filename
        if received_filename == 'Error':
            self.logger.info(
                "Stopping synchronization process: worker files weren't correctly received."
            )
            return
        try:
            logger = self.task_loggers['Integrity']
            logger.info("Analyzing received files: Start.")

            ko_files, zip_path = cluster.decompress_files(received_filename)
            logger.info(
                "Analyzing received files: Missing: {}. Shared: {}. Extra: {}. ExtraValid: {}"
                .format(len(ko_files['missing']), len(ko_files['shared']),
                        len(ko_files['extra']), len(ko_files['extra_valid'])))

            # Update files
            if ko_files['extra_valid']:
                logger.info("Master requires some worker files.")
                asyncio.create_task(
                    self.sync_extra_valid(ko_files['extra_valid']))

            if not ko_files['shared'] and not ko_files[
                    'missing'] and not ko_files['extra']:
                logger.info("Worker meets integrity checks. No actions.")
            else:
                logger.info(
                    "Worker does not meet integrity checks. Actions required.")
                logger.info("Updating files: Start.")
                self.update_master_files_in_worker(ko_files, zip_path)
                logger.info("Updating files: End.")
        finally:
            shutil.rmtree(zip_path)
示例#6
0
文件: master.py 项目: sqills/wazuh
    def process_integrity_from_worker(self,
                                      worker_name,
                                      data_received,
                                      cluster_control_key,
                                      cluster_control_subkey,
                                      tag=None):
        if not tag:
            tag = "[Master] [process_integrity_from_worker]"

        # Extract received data
        logger.info("{0}: Analyzing worker integrity: Start.".format(tag))

        try:
            json_file, zip_dir_path = decompress_files(data_received)
        except Exception as e:
            logger.error("{0}: Error decompressing data: {1}".format(
                tag, str(e)))
            raise e

        if json_file:
            master_files_from_worker = json_file['master_files']
        else:
            raise Exception(
                "cluster_control.json not included in received zip file")

        logger.info(
            "{0}: Analyzing worker integrity: Received {1} files to check.".
            format(tag, len(master_files_from_worker)))

        logger.info("{0}: Analyzing worker integrity: Checking files.".format(
            tag, len(master_files_from_worker)))

        # Get master files
        master_files = self.server.get_integrity_control()

        # Compare
        worker_files_ko = compare_files(master_files, master_files_from_worker)

        agent_groups_to_merge = {
            key: fnmatch.filter(values.keys(), '*/agent-groups/*')
            for key, values in worker_files_ko.items()
        }
        merged_files = {
            key: merge_agent_info(merge_type="agent-groups",
                                  files=values,
                                  file_type="-" + key,
                                  time_limit_seconds=0)
            for key, values in agent_groups_to_merge.items()
        }

        for ko, merged in zip(worker_files_ko.items(),
                              agent_groups_to_merge.items()):
            ko_type, ko_files = ko
            if ko_type == "extra" or "extra_valid":
                continue
            _, merged_filenames = merged
            for m in merged_filenames:
                del ko_files[m]
            n_files, merged_file = merged_files[ko_type]
            if n_files > 0:
                ko_files[merged_file] = {
                    'cluster_item_key': '/queue/agent-groups/',
                    'merged': True
                }

        # Save info for healthcheck
        self.manager.set_worker_status(worker_id=self.name,
                                       key=cluster_control_key,
                                       subkey=cluster_control_subkey,
                                       subsubkey="missing",
                                       status=len(worker_files_ko['missing']))
        self.manager.set_worker_status(worker_id=self.name,
                                       key=cluster_control_key,
                                       subkey=cluster_control_subkey,
                                       subsubkey="shared",
                                       status=len(worker_files_ko['shared']))
        self.manager.set_worker_status(worker_id=self.name,
                                       key=cluster_control_key,
                                       subkey=cluster_control_subkey,
                                       subsubkey="extra",
                                       status=len(worker_files_ko['extra']))
        self.manager.set_worker_status(worker_id=self.name,
                                       key=cluster_control_key,
                                       subkey=cluster_control_subkey,
                                       subsubkey="extra_valid",
                                       status=len(
                                           worker_files_ko['extra_valid']))
        # ---

        # Remove tmp directory created when zip file was received
        shutil.rmtree(zip_dir_path)

        # Step 3: KO files
        if len(list(filter(lambda x: x == {},
                           worker_files_ko.values()))) == len(worker_files_ko):
            logger.info(
                "{0}: Analyzing worker integrity: Files checked. There are no KO files."
                .format(tag))

            ko_files = False
            data_for_worker = None

        else:
            logger.info(
                "{0}: Analyzing worker integrity: Files checked. There are KO files."
                .format(tag))

            # Compress data: master files (only KO shared and missing)
            logger.debug(
                "{0} Analyzing worker integrity: Files checked. Compressing KO files."
                .format(tag))

            master_files_paths = [item for item in worker_files_ko['shared']]
            master_files_paths.extend(
                [item for item in worker_files_ko['missing']])

            compressed_data = compress_files(worker_name, master_files_paths,
                                             worker_files_ko)

            logger.debug(
                "{0} Analyzing worker integrity: Files checked. KO files compressed."
                .format(tag))

            ko_files = True
            data_for_worker = compressed_data

        logger.info("{0}: Analyzing worker integrity: End.".format(tag))

        return ko_files, data_for_worker