def process_request(self, command, data): logger.debug("[Worker] [Request-R ]: '{0}'.".format(command)) if command == 'echo-m': return 'ok-m ', data.decode() elif command == 'sync_m_c': cmf_thread = WorkerProcessMasterFiles(manager_handler=self, filename=data, stopper=self.stopper) cmf_thread.start() return 'ack', self.set_worker_thread(command, cmf_thread, data) elif command == 'sync_m_c_ok': logger.info("[Worker] [Integrity ]: The master has verified that the integrity is right.") self.integrity_received_and_processed.set() return 'ack', "Thanks2!" elif command == 'sync_m_c_err': logger.info("[Worker] [Integrity ]: The master was not able to verify the integrity.") self.integrity_received_and_processed.set() return 'ack', "Thanks!" elif command == 'file_status': master_files = get_files_status('master', get_md5=True) worker_files = get_files_status('worker', get_md5=True) files = master_files files.update(worker_files) return 'json', json.dumps(files) else: return WorkerHandler.process_request(self, command, data)
def process_request(self, command, data): logger.debug("[Worker ] [Request-R ]: '{0}'.".format(command)) if command == 'echo-m': return 'ok-m ', data.decode() elif command == 'sync_m_c': cmf_thread = WorkerProcessMasterFiles(manager_handler=self, filename=data, stopper=self.stopper) cmf_thread.start() return 'ack', self.set_worker_thread(command, cmf_thread, data) elif command == 'sync_m_c_ok': logger.info( "[Worker ] [Integrity ]: The master has verified that the integrity is right." ) self.integrity_received_and_processed.set() return 'ack', "Thanks2!" elif command == 'sync_m_c_err': logger.info( "[Worker ] [Integrity ]: The master was not able to verify the integrity." ) self.integrity_received_and_processed.set() return 'ack', "Thanks!" elif command == 'file_status': master_files = get_files_status('master', get_md5=True) worker_files = get_files_status('worker', get_md5=True) files = master_files files.update(worker_files) return 'json', json.dumps(files) elif command == 'string': string_sender_thread = FragmentedStringReceiverWorker( manager_handler=self, stopper=self.stopper) string_sender_thread.start() return 'ack', self.set_worker_thread(command, string_sender_thread) elif command == 'dapi': self.manager.add_api_request('None ' + data.decode()) return 'ack', 'Request is being processed' elif command == "dapi_res": string_receiver = FragmentedAPIResponseReceiver( manager_handler=self, stopper=self.stopper, worker_id=data.decode()) string_receiver.start() return 'ack', self.set_worker_thread(command, string_receiver) elif command == 'err-is': worker_id, err_msg = data.decode().split(' ', 1) self.isocket_handler.send_request(command=command, data=err_msg, worker_name=worker_id) return 'ack', 'thanks' else: return ClientHandler.process_request(self, command, data)
async def sync_agent_info(self): """ Asynchronous task that is started when the worker connects to the master. It starts an agent-info synchronization process every self.cluster_items['intervals']['worker']['sync_files'] seconds. :return: None """ agent_info_logger = self.task_loggers["Agent info"] while True: try: if self.connected: before = time.time() agent_info_logger.info( "Starting to send agent status files") worker_files = cluster.get_files_status('worker', self.name, get_md5=False) await SyncWorker(cmd=b'sync_a_w_m', files_to_sync=worker_files, checksums=worker_files, logger=agent_info_logger, worker=self).sync() after = time.time() agent_info_logger.debug2( "Time synchronizing agent statuses: {} s".format( after - before)) except Exception as e: agent_info_logger.error( "Error synchronizing agent status files: {}".format(e)) res = await self.send_request(command=b'sync_a_w_m_r', data=str(e).encode()) await asyncio.sleep( self.cluster_items['intervals']['worker']['sync_files'])
async def sync_integrity(self): """ Asynchronous task that is started when the worker connects to the master. It starts an integrity synchronization process every self.cluster_items['intervals']['worker']['sync_integrity'] seconds. :return: None """ integrity_logger = self.task_loggers["Integrity"] while True: try: if self.connected: before = time.time() await SyncWorker(cmd=b'sync_i_w_m', files_to_sync={}, checksums=cluster.get_files_status( 'master', self.name), logger=integrity_logger, worker=self).sync() after = time.time() integrity_logger.debug( "Time synchronizing integrity: {} s".format(after - before)) except Exception as e: integrity_logger.error( "Error synchronizing integrity: {}".format(e)) res = await self.send_request(command=b'sync_i_w_m_r', data=str(e).encode()) await asyncio.sleep( self.cluster_items['intervals']['worker']['sync_integrity'])
def send_integrity_to_master(self, reason=None, tag=None): if not tag: tag = "[Client] [Integrity]" logger.info("{0}: Reason: '{1}'".format(tag, reason)) master_node = self.config['nodes'][ 0] # Now, we only have 1 node: the master logger.info("{0}: Master found: {1}.".format(tag, master_node)) logger.info("{0}: Gathering files.".format(tag)) master_files = get_files_status('master') cluster_control_json = { 'master_files': master_files, 'client_files': None } logger.info("{0}: Gathered files: {1}.".format( tag, len(cluster_control_json['master_files']))) logger.debug("{0}: Compressing files.".format(tag)) # Compress data: control json compressed_data_path = compress_files('client', self.name, None, cluster_control_json) logger.debug("{0}: Files compressed.".format(tag)) return compressed_data_path
async def file_status_update(self): file_integrity_logger = self.setup_task_logger("File integrity") while True: file_integrity_logger.debug("Calculating") try: self.integrity_control = cluster.get_files_status('master', self.configuration['node_name']) except Exception as e: file_integrity_logger.error("Error calculating file integrity: {}".format(e)) file_integrity_logger.debug("Calculated.") await asyncio.sleep(self.cluster_items['intervals']['master']['recalculate_integrity'])
def run(self): while not self.stopper.is_set() and self.running: logger.debug("[Master] [IntegrityControl] Calculating.") try: tmp_integrity_control = get_files_status('master') self.master.set_integrity_control(tmp_integrity_control) except Exception as e: logger.error("[Master] [IntegrityControl] Error: {}".format(str(e))) logger.debug("[Master] [IntegrityControl] Calculated.") self.sleep(self.interval)
def send_client_files_to_master(self, reason=None, tag=None): data_for_master = None if not tag: tag = "[Client] [AgentInfo]" logger.info("{0}: Start. Reason: '{1}'".format(tag, reason)) master_node = self.config['nodes'][ 0] # Now, we only have 1 node: the master logger.info("{0}: Master found: {1}.".format(tag, master_node)) logger.info("{0}: Gathering files.".format(tag)) client_files = get_files_status('client', get_md5=False) cluster_control_json = { 'master_files': {}, 'client_files': client_files } # Getting client file paths: agent-info, agent-groups. client_files_paths = client_files.keys() logger.debug("{0}: Files gathered: {1}.".format( tag, len(client_files_paths))) if len(client_files_paths) != 0: logger.info("{0}: There are agent-info files to send.".format(tag)) # Compress data: client files + control json compressed_data_path = compress_files('client', self.name, client_files_paths, cluster_control_json) data_for_master = compressed_data_path else: logger.info( "{0}: There are no agent-info files to send.".format(tag)) return data_for_master
def process_request(self, command, data): logger.debug("[Transport-I] Forwarding request to master of cluster '{0}' - '{1}'".format(command, data)) serialized_response = "" data = data.decode() if command == 'get_files': split_data = data.split('%--%', 2) node_list = ast.literal_eval(split_data[1]) if split_data[1] else None get_my_files = False response = {} if node_list and len(node_list) > 0: #Selected nodes for node in node_list: if node == read_config()['node_name']: get_my_files = True continue node_file = self.manager.send_request(client_name=node, command='file_status', data='') if node_file.split(' ', 1)[0] == 'err': # Error response response.update({node:node_file.split(' ', 1)[1]}) else: response.update({node:json.loads(node_file.split(' ',1)[1])}) else: # Broadcast get_my_files = True node_file = list(self.manager.send_request_broadcast(command = 'file_status')) for node,data in node_file: try: response.update({node:json.loads(data.split(' ',1)[1])}) except ValueError: # json.loads will raise a ValueError response.update({node:data.split(' ',1)[1]}) if get_my_files: my_files = get_files_status('master', get_md5=True) my_files.update(get_files_status('client', get_md5=True)) response.update({read_config()['node_name']:my_files}) # Filter files if node_list and len(response): response = {node: response.get(node) for node in node_list} serialized_response = ['ok', json.dumps(response)] return serialized_response elif command == 'get_nodes': response = {name:data['info'] for name,data in self.manager.get_connected_clients().items()} cluster_config = read_config() response.update({cluster_config['node_name']:{"name": cluster_config['node_name'], "ip": cluster_config['nodes'][0], "type": "master", "version":__version__}}) serialized_response = ['ok', json.dumps(response)] return serialized_response elif command == 'get_agents': split_data = data.split('%--%', 5) filter_status = split_data[0] if split_data[0] != 'None' else None filter_nodes = split_data[1] if split_data[1] != 'None' else None offset = split_data[2] if split_data[2] != 'None' else None limit = split_data[3] if split_data[3] != 'None' else None sort = split_data[4] if split_data[4] != 'None' else None search = split_data[5] if split_data[5] != 'None' else None response = get_agents_status(filter_status, filter_nodes, offset, limit, sort, search) serialized_response = ['ok', json.dumps(response)] return serialized_response elif command == 'get_health': node_list = data if data != 'None' else None response = self.manager.get_healthcheck(node_list) serialized_response = ['ok', json.dumps(response)] return serialized_response elif command == 'sync': command = "req_sync_m_c" split_data = data.split(' ', 1) node_list = ast.literal_eval(split_data[0]) if split_data[0] else None if node_list: for node in node_list: response = {node:self.manager.send_request(client_name=node, command=command, data="")} serialized_response = ['ok', json.dumps(response)] else: response = list(self.manager.send_request_broadcast(command=command, data=data)) serialized_response = ['ok', json.dumps({node:data for node,data in response})] return serialized_response else: return ['err', json.dumps({'err': "Received an unknown command '{}'".format(command)})]
def process_request(self, command, data): logger.debug( "[Transport-I] Forwarding request to master of cluster '{0}' - '{1}'" .format(command, data)) serialized_response = "" if command == 'get_files': split_data = data.split('%--%', 2) file_list = ast.literal_eval( split_data[0]) if split_data[0] else None node_list = ast.literal_eval( split_data[1]) if split_data[1] else None get_my_files = False response = {} if node_list and len(node_list) > 0: #Selected nodes for node in node_list: if node == read_config()['node_name']: get_my_files = True continue node_file = self.manager.send_request( client_name=node, command='file_status', data='') if node_file.split(' ', 1)[0] == 'err': # Error response response.update({node: node_file.split(' ', 1)[1]}) else: response.update( {node: json.loads(node_file.split(' ', 1)[1])}) else: # Broadcast get_my_files = True node_file = list( self.manager.send_request_broadcast(command='file_status')) for node, data in node_file: try: response.update( {node: json.loads(data.split(' ', 1)[1])}) except: # Error response response.update({node: data.split(' ', 1)[1]}) if get_my_files: my_files = get_files_status('master', get_md5=True) my_files.update(get_files_status('client', get_md5=True)) response.update({read_config()['node_name']: my_files}) # Filter files if node_list and len(response): response = {node: response.get(node) for node in node_list} serialized_response = ['ok', json.dumps(response)] return serialized_response elif command == 'get_nodes': split_data = data.split(' ', 1) node_list = ast.literal_eval( split_data[0]) if split_data[0] else None response = { name: data['info'] for name, data in self.manager.get_connected_clients().iteritems() } cluster_config = read_config() response.update({ cluster_config['node_name']: { "name": cluster_config['node_name'], "ip": cluster_config['nodes'][0], "type": "master" } }) if node_list: response = { node: info for node, info in response.iteritems() if node in node_list } serialized_response = ['ok', json.dumps(response)] return serialized_response elif command == 'get_agents': split_data = data.split('%--%', 1) filter_status = split_data[0] if split_data[0] != 'None' else None filter_nodes = split_data[1] if split_data[1] != 'None' else None response = get_agents_status(filter_status, filter_nodes) serialized_response = ['ok', json.dumps(response)] return serialized_response elif command == 'get_health': response = self.manager.get_healthcheck() serialized_response = ['ok', json.dumps(response)] return serialized_response elif command == 'sync': command = "req_sync_m_c" split_data = data.split(' ', 1) node_list = ast.literal_eval( split_data[0]) if split_data[0] else None if node_list: for node in node_list: response = { node: self.manager.send_request(client_name=node, command=command, data="") } serialized_response = ['ok', json.dumps(response)] else: response = list( self.manager.send_request_broadcast(command=command, data=data)) serialized_response = [ 'ok', json.dumps({node: data for node, data in response}) ] return serialized_response else: split_data = data.split(' ', 1) host = split_data[0] data = split_data[1] if len(split_data) > 1 else None if host == 'all': response = list( self.manager.send_request_broadcast(command=command, data=data)) serialized_response = [ 'ok', json.dumps({node: data for node, data in response}) ] else: response = self.manager.send_request(client_name=host, command=command, data=data) if response: type_response = node_response[0] response = node_response[1] if type_response == "err": serialized_response = {"err": response} else: serialized_response = response return serialized_response