Exemple #1
0
    def send_extra_valid_files_to_master(self, files, reason=None, tag=None):
        if not tag:
            tag = "[Worker] [ReqFiles   ]"

        logger.info("{}: Start. Reason: '{}'.".format(tag, reason))

        master_node = self.config['nodes'][0]  # Now, we only have 1 node: the master

        logger.info("{0}: Master found: {1}.".format(tag, master_node))

        agent_groups_to_merge = set(fnmatch.filter(files.keys(), '*/agent-groups/*'))
        if agent_groups_to_merge:
            n_files, merged_file = merge_agent_info(merge_type='agent-groups',
                                              files=agent_groups_to_merge,
                                              time_limit_seconds=0)
            for ag in agent_groups_to_merge:
                del files[ag]

            if n_files:
                files.update({merged_file: {'merged': True,
                                            'merge_name': merged_file,
                                            'merge_type': 'agent-groups',
                                            'cluster_item_key': '/queue/agent-groups/'}})

        compressed_data_path = compress_files(self.name, files, {'worker_files': files})

        return compressed_data_path
Exemple #2
0
 async def sync_extra_valid(self, extra_valid: Dict):
     extra_valid_logger = self.task_loggers["Extra valid"]
     try:
         before = time.time()
         self.logger.debug("Starting to send extra valid files")
         # TODO: Add support for more extra valid file types if ever added
         n_files, merged_file = cluster.merge_agent_info(
             merge_type='agent-groups',
             files=extra_valid.keys(),
             time_limit_seconds=0,
             node_name=self.name)
         if n_files:
             files_to_sync = {
                 merged_file: {
                     'merged': True,
                     'merge_type': 'agent-groups',
                     'merge_name': merged_file,
                     'cluster_item_key': '/queue/agent-groups/'
                 }
             }
             my_worker = SyncWorker(cmd=b'sync_e_w_m',
                                    files_to_sync=files_to_sync,
                                    checksums=files_to_sync,
                                    logger=extra_valid_logger,
                                    worker=self)
             await my_worker.sync()
         after = time.time()
         self.logger.debug2(
             "Time synchronizing extra valid files: {} s".format(after -
                                                                 before))
     except Exception as e:
         extra_valid_logger.error(
             "Error synchronizing extra valid files: {}".format(e))
         res = await self.send_request(command=b'sync_e_w_m_r',
                                       data=str(e).encode())
Exemple #3
0
def test_merge_agent_info(stat_mock, listdir_mock):
    """
    Tests merge agent info function
    """
    stat_mock.return_value.st_mtime = time.time()
    stat_mock.return_value.st_size = len(agent_info)

    with patch('builtins.open', mock_open(read_data=agent_info)) as m:
        cluster.merge_agent_info('agent-info', 'worker1')
        m.assert_any_call('/var/ossec/queue/cluster/worker1/agent-info.merged',
                          'wb')
        m.assert_any_call('/var/ossec/queue/agent-info/agent1-any', 'rb')
        m.assert_any_call('/var/ossec/queue/agent-info/agent2-any', 'rb')
        handle = m()
        expected = f'{len(agent_info)} agent1-any {datetime.utcfromtimestamp(stat_mock.return_value.st_mtime)}\n'.encode(
        ) + agent_info
        handle.write.assert_any_call(expected)
Exemple #4
0
    def process_integrity_from_worker(self,
                                      worker_name,
                                      data_received,
                                      cluster_control_key,
                                      cluster_control_subkey,
                                      tag=None):
        if not tag:
            tag = "[Master] [process_integrity_from_worker]"

        # Extract received data
        logger.info("{0}: Analyzing worker integrity: Start.".format(tag))

        try:
            json_file, zip_dir_path = decompress_files(data_received)
        except Exception as e:
            logger.error("{0}: Error decompressing data: {1}".format(
                tag, str(e)))
            raise e

        if json_file:
            master_files_from_worker = json_file['master_files']
        else:
            raise Exception(
                "cluster_control.json not included in received zip file")

        logger.info(
            "{0}: Analyzing worker integrity: Received {1} files to check.".
            format(tag, len(master_files_from_worker)))

        logger.info("{0}: Analyzing worker integrity: Checking files.".format(
            tag, len(master_files_from_worker)))

        # Get master files
        master_files = self.server.get_integrity_control()

        # Compare
        worker_files_ko = compare_files(master_files, master_files_from_worker)

        agent_groups_to_merge = {
            key: fnmatch.filter(values.keys(), '*/agent-groups/*')
            for key, values in worker_files_ko.items()
        }
        merged_files = {
            key: merge_agent_info(merge_type="agent-groups",
                                  files=values,
                                  file_type="-" + key,
                                  time_limit_seconds=0)
            for key, values in agent_groups_to_merge.items()
        }

        for ko, merged in zip(worker_files_ko.items(),
                              agent_groups_to_merge.items()):
            ko_type, ko_files = ko
            if ko_type == "extra" or "extra_valid":
                continue
            _, merged_filenames = merged
            for m in merged_filenames:
                del ko_files[m]
            n_files, merged_file = merged_files[ko_type]
            if n_files > 0:
                ko_files[merged_file] = {
                    'cluster_item_key': '/queue/agent-groups/',
                    'merged': True
                }

        # Save info for healthcheck
        self.manager.set_worker_status(worker_id=self.name,
                                       key=cluster_control_key,
                                       subkey=cluster_control_subkey,
                                       subsubkey="missing",
                                       status=len(worker_files_ko['missing']))
        self.manager.set_worker_status(worker_id=self.name,
                                       key=cluster_control_key,
                                       subkey=cluster_control_subkey,
                                       subsubkey="shared",
                                       status=len(worker_files_ko['shared']))
        self.manager.set_worker_status(worker_id=self.name,
                                       key=cluster_control_key,
                                       subkey=cluster_control_subkey,
                                       subsubkey="extra",
                                       status=len(worker_files_ko['extra']))
        self.manager.set_worker_status(worker_id=self.name,
                                       key=cluster_control_key,
                                       subkey=cluster_control_subkey,
                                       subsubkey="extra_valid",
                                       status=len(
                                           worker_files_ko['extra_valid']))
        # ---

        # Remove tmp directory created when zip file was received
        shutil.rmtree(zip_dir_path)

        # Step 3: KO files
        if len(list(filter(lambda x: x == {},
                           worker_files_ko.values()))) == len(worker_files_ko):
            logger.info(
                "{0}: Analyzing worker integrity: Files checked. There are no KO files."
                .format(tag))

            ko_files = False
            data_for_worker = None

        else:
            logger.info(
                "{0}: Analyzing worker integrity: Files checked. There are KO files."
                .format(tag))

            # Compress data: master files (only KO shared and missing)
            logger.debug(
                "{0} Analyzing worker integrity: Files checked. Compressing KO files."
                .format(tag))

            master_files_paths = [item for item in worker_files_ko['shared']]
            master_files_paths.extend(
                [item for item in worker_files_ko['missing']])

            compressed_data = compress_files(worker_name, master_files_paths,
                                             worker_files_ko)

            logger.debug(
                "{0} Analyzing worker integrity: Files checked. KO files compressed."
                .format(tag))

            ko_files = True
            data_for_worker = compressed_data

        logger.info("{0}: Analyzing worker integrity: End.".format(tag))

        return ko_files, data_for_worker