async def sync_integrity(self, task_name: str, received_file: asyncio.Event): logger = self.task_loggers['Integrity'] self.sync_integrity_status['date_start_master'] = str(datetime.now()) logger.info("Waiting to receive zip file from worker") await asyncio.wait_for(received_file.wait(), timeout=self.cluster_items['intervals']['communication']['timeout_receiving_file']) received_filename = self.sync_tasks[task_name].filename if received_filename == 'Error': logger.info("Stopping synchronization process: worker files weren't correctly received.") return logger.debug("Received file from worker: '{}'".format(received_filename)) files_checksums, decompressed_files_path = await cluster.decompress_files(received_filename) logger.info("Analyzing worker integrity: Received {} files to check.".format(len(files_checksums))) # classify files in shared, missing, extra and extra valid. worker_files_ko, counts = cluster.compare_files(self.server.integrity_control, files_checksums, self.name) # health check self.sync_integrity_status['total_files'] = counts shutil.rmtree(decompressed_files_path) if not functools.reduce(operator.add, map(len, worker_files_ko.values())): logger.info("Analyzing worker integrity: Files checked. There are no KO files.") result = await self.send_request(command=b'sync_m_c_ok', data=b'') else: logger.info("Analyzing worker integrity: Files checked. There are KO files.") # Compress data: master files (only KO shared and missing) logger.debug("Analyzing worker integrity: Files checked. Compressing KO files.") master_files_paths = worker_files_ko['shared'].keys() | worker_files_ko['missing'].keys() compressed_data = cluster.compress_files(self.name, master_files_paths, worker_files_ko) try: logger.info("Analyzing worker integrity: Files checked. KO files compressed.") task_name = await self.send_request(command=b'sync_m_c', data=b'') if task_name.startswith(b'Error'): logger.error(task_name.decode()) return task_name result = await self.send_file(compressed_data) finally: os.unlink(compressed_data) if result.startswith(b'Error'): self.logger.error("Error sending files information: {}".format(result.decode())) result = await self.send_request(command=b'sync_m_c_e', data=task_name + b' ' + b'Error') else: result = await self.send_request(command=b'sync_m_c_e', data=task_name + b' ' + compressed_data.replace(common.ossec_path, '').encode()) if result.startswith(b'Error'): self.logger.error(result.decode()) self.sync_integrity_status['date_end_master'] = str(datetime.now()) self.sync_integrity_free = True logger.info("Finished integrity synchronization.") return result
def process_integrity_from_worker(self, worker_name, data_received, cluster_control_key, cluster_control_subkey, tag=None): if not tag: tag = "[Master] [process_integrity_from_worker]" # Extract received data logger.info("{0}: Analyzing worker integrity: Start.".format(tag)) try: json_file, zip_dir_path = decompress_files(data_received) except Exception as e: logger.error("{0}: Error decompressing data: {1}".format( tag, str(e))) raise e if json_file: master_files_from_worker = json_file['master_files'] else: raise Exception( "cluster_control.json not included in received zip file") logger.info( "{0}: Analyzing worker integrity: Received {1} files to check.". format(tag, len(master_files_from_worker))) logger.info("{0}: Analyzing worker integrity: Checking files.".format( tag, len(master_files_from_worker))) # Get master files master_files = self.server.get_integrity_control() # Compare worker_files_ko = compare_files(master_files, master_files_from_worker) agent_groups_to_merge = { key: fnmatch.filter(values.keys(), '*/agent-groups/*') for key, values in worker_files_ko.items() } merged_files = { key: merge_agent_info(merge_type="agent-groups", files=values, file_type="-" + key, time_limit_seconds=0) for key, values in agent_groups_to_merge.items() } for ko, merged in zip(worker_files_ko.items(), agent_groups_to_merge.items()): ko_type, ko_files = ko if ko_type == "extra" or "extra_valid": continue _, merged_filenames = merged for m in merged_filenames: del ko_files[m] n_files, merged_file = merged_files[ko_type] if n_files > 0: ko_files[merged_file] = { 'cluster_item_key': '/queue/agent-groups/', 'merged': True } # Save info for healthcheck self.manager.set_worker_status(worker_id=self.name, key=cluster_control_key, subkey=cluster_control_subkey, subsubkey="missing", status=len(worker_files_ko['missing'])) self.manager.set_worker_status(worker_id=self.name, key=cluster_control_key, subkey=cluster_control_subkey, subsubkey="shared", status=len(worker_files_ko['shared'])) self.manager.set_worker_status(worker_id=self.name, key=cluster_control_key, subkey=cluster_control_subkey, subsubkey="extra", status=len(worker_files_ko['extra'])) self.manager.set_worker_status(worker_id=self.name, key=cluster_control_key, subkey=cluster_control_subkey, subsubkey="extra_valid", status=len( worker_files_ko['extra_valid'])) # --- # Remove tmp directory created when zip file was received shutil.rmtree(zip_dir_path) # Step 3: KO files if len(list(filter(lambda x: x == {}, worker_files_ko.values()))) == len(worker_files_ko): logger.info( "{0}: Analyzing worker integrity: Files checked. There are no KO files." .format(tag)) ko_files = False data_for_worker = None else: logger.info( "{0}: Analyzing worker integrity: Files checked. There are KO files." .format(tag)) # Compress data: master files (only KO shared and missing) logger.debug( "{0} Analyzing worker integrity: Files checked. Compressing KO files." .format(tag)) master_files_paths = [item for item in worker_files_ko['shared']] master_files_paths.extend( [item for item in worker_files_ko['missing']]) compressed_data = compress_files(worker_name, master_files_paths, worker_files_ko) logger.debug( "{0} Analyzing worker integrity: Files checked. KO files compressed." .format(tag)) ko_files = True data_for_worker = compressed_data logger.info("{0}: Analyzing worker integrity: End.".format(tag)) return ko_files, data_for_worker