Esempio n. 1
0
    def send_extra_valid_files_to_master(self, files, reason=None, tag=None):
        if not tag:
            tag = "[Worker] [ReqFiles   ]"

        logger.info("{}: Start. Reason: '{}'.".format(tag, reason))

        master_node = self.config['nodes'][0]  # Now, we only have 1 node: the master

        logger.info("{0}: Master found: {1}.".format(tag, master_node))

        agent_groups_to_merge = set(fnmatch.filter(files.keys(), '*/agent-groups/*'))
        if agent_groups_to_merge:
            n_files, merged_file = merge_agent_info(merge_type='agent-groups',
                                              files=agent_groups_to_merge,
                                              time_limit_seconds=0)
            for ag in agent_groups_to_merge:
                del files[ag]

            if n_files:
                files.update({merged_file: {'merged': True,
                                            'merge_name': merged_file,
                                            'merge_type': 'agent-groups',
                                            'cluster_item_key': '/queue/agent-groups/'}})

        compressed_data_path = compress_files(self.name, files, {'worker_files': files})

        return compressed_data_path
Esempio n. 2
0
    def send_integrity_to_master(self, reason=None, tag=None):
        if not tag:
            tag = "[Client] [Integrity]"

        logger.info("{0}: Reason: '{1}'".format(tag, reason))

        master_node = self.config['nodes'][
            0]  # Now, we only have 1 node: the master

        logger.info("{0}: Master found: {1}.".format(tag, master_node))

        logger.info("{0}: Gathering files.".format(tag))

        master_files = get_files_status('master')
        cluster_control_json = {
            'master_files': master_files,
            'client_files': None
        }

        logger.info("{0}: Gathered files: {1}.".format(
            tag, len(cluster_control_json['master_files'])))

        logger.debug("{0}: Compressing files.".format(tag))
        # Compress data: control json
        compressed_data_path = compress_files('client', self.name, None,
                                              cluster_control_json)

        logger.debug("{0}: Files compressed.".format(tag))

        return compressed_data_path
Esempio n. 3
0
    async def sync_integrity(self, task_name: str, received_file: asyncio.Event):
        logger = self.task_loggers['Integrity']

        self.sync_integrity_status['date_start_master'] = str(datetime.now())

        logger.info("Waiting to receive zip file from worker")
        await asyncio.wait_for(received_file.wait(),
                               timeout=self.cluster_items['intervals']['communication']['timeout_receiving_file'])
        received_filename = self.sync_tasks[task_name].filename
        if received_filename == 'Error':
            logger.info("Stopping synchronization process: worker files weren't correctly received.")
            return
        logger.debug("Received file from worker: '{}'".format(received_filename))

        files_checksums, decompressed_files_path = await cluster.decompress_files(received_filename)
        logger.info("Analyzing worker integrity: Received {} files to check.".format(len(files_checksums)))

        # classify files in shared, missing, extra and extra valid.
        worker_files_ko, counts = cluster.compare_files(self.server.integrity_control, files_checksums, self.name)

        # health check
        self.sync_integrity_status['total_files'] = counts
        shutil.rmtree(decompressed_files_path)

        if not functools.reduce(operator.add, map(len, worker_files_ko.values())):
            logger.info("Analyzing worker integrity: Files checked. There are no KO files.")
            result = await self.send_request(command=b'sync_m_c_ok', data=b'')
        else:
            logger.info("Analyzing worker integrity: Files checked. There are KO files.")

            # Compress data: master files (only KO shared and missing)
            logger.debug("Analyzing worker integrity: Files checked. Compressing KO files.")
            master_files_paths = worker_files_ko['shared'].keys() | worker_files_ko['missing'].keys()
            compressed_data = cluster.compress_files(self.name, master_files_paths, worker_files_ko)

            try:
                logger.info("Analyzing worker integrity: Files checked. KO files compressed.")
                task_name = await self.send_request(command=b'sync_m_c', data=b'')
                if task_name.startswith(b'Error'):
                    logger.error(task_name.decode())
                    return task_name

                result = await self.send_file(compressed_data)
            finally:
                os.unlink(compressed_data)

            if result.startswith(b'Error'):
                self.logger.error("Error sending files information: {}".format(result.decode()))
                result = await self.send_request(command=b'sync_m_c_e', data=task_name + b' ' + b'Error')
            else:
                result = await self.send_request(command=b'sync_m_c_e',
                                                 data=task_name + b' ' + compressed_data.replace(common.ossec_path, '').encode())

            if result.startswith(b'Error'):
                self.logger.error(result.decode())

        self.sync_integrity_status['date_end_master'] = str(datetime.now())
        self.sync_integrity_free = True
        logger.info("Finished integrity synchronization.")
        return result
Esempio n. 4
0
    async def sync(self):
        """
        Starts synchronization process with the master and sends necessary information
        """
        result = await self.worker.send_request(command=self.cmd+b'_p', data=b'')
        if result.startswith(b'Error'):
            self.logger.error('Error asking for permission: {}'.format(result.decode()))
            return
        elif result == b'False':
            self.logger.info('Master didnt grant permission to synchronize')
            return
        else:
            self.logger.info("Permission to synchronize granted.")

        self.logger.info("Compressing files")
        compressed_data_path = cluster.compress_files(name=self.worker.name, list_path=self.files_to_sync,
                                                      cluster_control_json=self.checksums)
        try:
            task_id = await self.worker.send_request(command=self.cmd, data=b'')

            self.logger.info("Sending compressed file to master")
            result = await self.worker.send_file(filename=compressed_data_path)
        finally:
            os.unlink(compressed_data_path)
        if result.startswith(b'Error'):
            self.logger.error("Error sending files information: {}".format(result.decode()))
            result = await self.worker.send_request(command=self.cmd+b'_e', data=task_id + b' ' + b'Error')
        else:
            self.logger.info("Worker files sent to master.")
            result = await self.worker.send_request(
                command=self.cmd+b'_e', data=task_id + b' ' + compressed_data_path.replace(common.ossec_path, '').encode())

        if result.startswith(b'Error'):
            self.logger.error(result.decode())
Esempio n. 5
0
    def send_client_files_to_master(self, reason=None, tag=None):
        data_for_master = None

        if not tag:
            tag = "[Client] [AgentInfo]"

        logger.info("{0}: Start. Reason: '{1}'".format(tag, reason))

        master_node = self.config['nodes'][
            0]  # Now, we only have 1 node: the master

        logger.info("{0}: Master found: {1}.".format(tag, master_node))

        logger.info("{0}: Gathering files.".format(tag))

        client_files = get_files_status('client', get_md5=False)
        cluster_control_json = {
            'master_files': {},
            'client_files': client_files
        }

        # Getting client file paths: agent-info, agent-groups.
        client_files_paths = client_files.keys()

        logger.debug("{0}: Files gathered: {1}.".format(
            tag, len(client_files_paths)))

        if len(client_files_paths) != 0:
            logger.info("{0}: There are agent-info files to send.".format(tag))

            # Compress data: client files + control json
            compressed_data_path = compress_files('client', self.name,
                                                  client_files_paths,
                                                  cluster_control_json)

            data_for_master = compressed_data_path

        else:
            logger.info(
                "{0}: There are no agent-info files to send.".format(tag))

        return data_for_master
Esempio n. 6
0
    def process_integrity_from_worker(self,
                                      worker_name,
                                      data_received,
                                      cluster_control_key,
                                      cluster_control_subkey,
                                      tag=None):
        if not tag:
            tag = "[Master] [process_integrity_from_worker]"

        # Extract received data
        logger.info("{0}: Analyzing worker integrity: Start.".format(tag))

        try:
            json_file, zip_dir_path = decompress_files(data_received)
        except Exception as e:
            logger.error("{0}: Error decompressing data: {1}".format(
                tag, str(e)))
            raise e

        if json_file:
            master_files_from_worker = json_file['master_files']
        else:
            raise Exception(
                "cluster_control.json not included in received zip file")

        logger.info(
            "{0}: Analyzing worker integrity: Received {1} files to check.".
            format(tag, len(master_files_from_worker)))

        logger.info("{0}: Analyzing worker integrity: Checking files.".format(
            tag, len(master_files_from_worker)))

        # Get master files
        master_files = self.server.get_integrity_control()

        # Compare
        worker_files_ko = compare_files(master_files, master_files_from_worker)

        agent_groups_to_merge = {
            key: fnmatch.filter(values.keys(), '*/agent-groups/*')
            for key, values in worker_files_ko.items()
        }
        merged_files = {
            key: merge_agent_info(merge_type="agent-groups",
                                  files=values,
                                  file_type="-" + key,
                                  time_limit_seconds=0)
            for key, values in agent_groups_to_merge.items()
        }

        for ko, merged in zip(worker_files_ko.items(),
                              agent_groups_to_merge.items()):
            ko_type, ko_files = ko
            if ko_type == "extra" or "extra_valid":
                continue
            _, merged_filenames = merged
            for m in merged_filenames:
                del ko_files[m]
            n_files, merged_file = merged_files[ko_type]
            if n_files > 0:
                ko_files[merged_file] = {
                    'cluster_item_key': '/queue/agent-groups/',
                    'merged': True
                }

        # Save info for healthcheck
        self.manager.set_worker_status(worker_id=self.name,
                                       key=cluster_control_key,
                                       subkey=cluster_control_subkey,
                                       subsubkey="missing",
                                       status=len(worker_files_ko['missing']))
        self.manager.set_worker_status(worker_id=self.name,
                                       key=cluster_control_key,
                                       subkey=cluster_control_subkey,
                                       subsubkey="shared",
                                       status=len(worker_files_ko['shared']))
        self.manager.set_worker_status(worker_id=self.name,
                                       key=cluster_control_key,
                                       subkey=cluster_control_subkey,
                                       subsubkey="extra",
                                       status=len(worker_files_ko['extra']))
        self.manager.set_worker_status(worker_id=self.name,
                                       key=cluster_control_key,
                                       subkey=cluster_control_subkey,
                                       subsubkey="extra_valid",
                                       status=len(
                                           worker_files_ko['extra_valid']))
        # ---

        # Remove tmp directory created when zip file was received
        shutil.rmtree(zip_dir_path)

        # Step 3: KO files
        if len(list(filter(lambda x: x == {},
                           worker_files_ko.values()))) == len(worker_files_ko):
            logger.info(
                "{0}: Analyzing worker integrity: Files checked. There are no KO files."
                .format(tag))

            ko_files = False
            data_for_worker = None

        else:
            logger.info(
                "{0}: Analyzing worker integrity: Files checked. There are KO files."
                .format(tag))

            # Compress data: master files (only KO shared and missing)
            logger.debug(
                "{0} Analyzing worker integrity: Files checked. Compressing KO files."
                .format(tag))

            master_files_paths = [item for item in worker_files_ko['shared']]
            master_files_paths.extend(
                [item for item in worker_files_ko['missing']])

            compressed_data = compress_files(worker_name, master_files_paths,
                                             worker_files_ko)

            logger.debug(
                "{0} Analyzing worker integrity: Files checked. KO files compressed."
                .format(tag))

            ko_files = True
            data_for_worker = compressed_data

        logger.info("{0}: Analyzing worker integrity: End.".format(tag))

        return ko_files, data_for_worker