def get_healthcheck(self): clients_info = { name: { "info": data['info'], "status": data['status'] } for name, data in self.get_connected_clients().iteritems() } cluster_config = read_config() clients_info.update({ cluster_config['node_name']: { "info": { "name": cluster_config['node_name'], "ip": cluster_config['nodes'][0], "type": "master" } } }) health_info = { "n_connected_nodes": len(clients_info), "nodes": clients_info } return health_info
def process_request(self, command, data): logger.debug("[Master ] [LocalServer ] Request received in cluster local server: '{0}' - '{1}'".format(command, data)) data = data.decode() if command == 'get_nodes': response = {name:data['info'] for name,data in self.server.manager.get_connected_workers().items()} cluster_config = read_config() response.update({cluster_config['node_name']:{"name": cluster_config['node_name'], "ip": cluster_config['nodes'][0], "type": "master", "version":__version__}}) serialized_response = ['json', json.dumps(response)] return serialized_response elif command == 'get_health': _, data = data.split(' ', 1) node_list = data if data != 'None' else None response = self.server.manager.get_healthcheck(node_list) serialized_response = ['json', json.dumps(response)] return serialized_response elif command == 'get_config': response = self.server.manager.get_configuration() serialized_response = ['ok', json.dumps(response)] return serialized_response elif command == 'dapi': return ['json', dapi.distribute_function(json.loads(data.split(' ', 1)[1]))] elif command == 'dapi_forward': worker_id, node_name, input_json = data.split(' ', 2) res_cmd, res = self.server.manager.send_request(worker_name=node_name, command='dapi', data=worker_id + ' ' + input_json).split(' ', 1) return res_cmd, res if res_cmd != 'err' else json.dumps({'err': res}) else: return InternalSocketHandler.process_request(self, command, data)
def test_checking_configuration(read_config): """ Checks wrong configurations to check the proper exceptions are raised """ with patch('wazuh.cluster.cluster.get_ossec_conf') as m: m.return_value = read_config.copy() with pytest.raises(WazuhException, match=r'.* 3004 .*'): configuration = cluster.read_config() cluster.check_cluster_config(configuration)
def process_request(self, command, data): logger.debug("[Master] [{0}] [Request-R]: '{1}'.".format(self.name, command)) if command == 'echo-c': # Echo return 'ok-c ', data.decode() elif command == 'sync_i_c_m_p': result = self.manager.get_client_status(client_id=self.name, key='sync_integrity_free') return 'ack', str(result) elif command == 'sync_ai_c_mp': return 'ack', str(self.manager.get_client_status(client_id=self.name, key='sync_agentinfo_free')) elif command == 'sync_ev_c_mp': return 'ack', str(self.manager.get_client_status(client_id=self.name, key='sync_extravalid_free')) elif command == 'sync_i_c_m': # Client syncs integrity data = data.decode() pci_thread = ProcessClientIntegrity(manager=self.manager, manager_handler=self, filename=data, stopper=self.stopper) pci_thread.start() # data will contain the filename return 'ack', self.set_worker(command, pci_thread, data) elif command == 'sync_ai_c_m': data = data.decode() mcf_thread = ProcessClientFiles(manager_handler=self, filename=data, stopper=self.stopper) mcf_thread.start() # data will contain the filename return 'ack', self.set_worker(command, mcf_thread, data) elif command == 'sync_ev_c_m': data = data.decode() mcf_thread = ProcessExtraValidFiles(manager_handler=self, filename=data, stopper=self.stopper) mcf_thread.start() return 'ack', self.set_worker(command, mcf_thread, data) elif command == 'get_nodes': data = data.decode() response = {name:data['info'] for name,data in self.server.get_connected_clients().items()} cluster_config = read_config() response.update({cluster_config['node_name']:{"name": cluster_config['node_name'], "ip": cluster_config['nodes'][0], "type": "master", "version": __version__}}) serialized_response = ['ok', json.dumps(response)] return serialized_response elif command == 'get_health': filter_nodes = data.decode() response = self.manager.get_healthcheck(filter_nodes) serialized_response = ['ok', json.dumps(response)] return serialized_response elif command == 'get_agents': data = data.decode() split_data = data.split('%--%', 5) filter_status = split_data[0] if split_data[0] != 'None' else None filter_nodes = split_data[1] if split_data[1] != 'None' else None offset = split_data[2] if split_data[2] != 'None' else None limit = split_data[3] if split_data[3] != 'None' else None sort = split_data[4] if split_data[4] != 'None' else None search = split_data[5] if split_data[5] != 'None' else None response = get_agents_status(filter_status, filter_nodes, offset, limit, sort, search) serialized_response = ['ok', json.dumps(response)] return serialized_response else: # Non-master requests return ServerHandler.process_request(self, command, data)
def test_read_empty_configuration(): """ Tests reading an empty cluster configuration """ with patch('wazuh.cluster.cluster.get_ossec_conf') as m: m.side_effect = WazuhException(1106) configuration = cluster.read_config() configuration[ 'disabled'] = 'yes' if configuration['disabled'] else 'no' assert configuration == default_cluster_configuration
def check_cluster_status(): # Get cluster config cluster_config = read_config() if not cluster_config or cluster_config['disabled'] == 'yes': raise WazuhException(3013) # Validate cluster config check_cluster_config(cluster_config) status = get_status_json() if status["running"] != "yes": raise WazuhException(3012)
def worker_main(test_name, test_size, filepath): c_config = cluster.read_config() # Test threads if test_name == "test0": # just connect print("Just connect") while True: print("Test: just listening") worker = WorkerManager(c_config) asyncore.loop(timeout=1, map=worker.map) time.sleep(1) asyncore.loop(timeout=1, map=worker.map) elif test_name == 'test1': worker = WorkerManager(c_config) c_test_thread = WorkerTest('trehad 0', test_name, test_size) c_test_thread.start() c_test_thread.setworker(worker) asyncore.loop(timeout=1, map=worker.map) elif test_name == 'test2': worker = WorkerManager(c_config) thread_pool = [] for i in range(10): thread_pool.append( WorkerTest('thread {0}'.format(i), 'test1', test_size)) thread_pool[i].setworker(worker) for i in range(10): thread_pool[i].start() asyncore.loop(timeout=1, map=worker.map) elif test_name == 'testf': worker = WorkerManager(c_config) thread_test = WorkerTest(t_name='thread0', test_name='testf', filepath=filepath) thread_test.setworker(worker) thread_test.start() asyncore.loop(timeout=1, map=worker.map) elif test_name == 'testc': worker = WorkerManager(c_config) thread_test = WorkerTest(t_name='thread0', test_name='testc') thread_test.setworker(worker) thread_test.start() asyncore.loop(timeout=1, map=worker.map) else: print("No test selected") print("Exiting...")
def __init__(self, command: bytes, data: bytes, wait_for_complete: bool): super().__init__(configuration=cluster.read_config(), enable_ssl=False, performance_test=0, concurrency_test=0, file='', string=0, logger=logging.getLogger(), tag="Local Client", cluster_items=cluster.get_cluster_items()) self.request_result = None self.command = command self.data = data self.wait_for_complete = wait_for_complete self.protocol = None self.transport = None
def get_healthcheck(self, filter_nodes=None): clients_info = {name:{"info":dict(data['info']), "status":data['status']} for name,data in self.get_connected_clients().items() if not filter_nodes or name in filter_nodes} n_connected_nodes = len(self.get_connected_clients().items()) + 1 # clients + master cluster_config = read_config() if not filter_nodes or cluster_config['node_name'] in filter_nodes: clients_info.update({cluster_config['node_name']:{"info":{"name": cluster_config['node_name'], "ip": cluster_config['nodes'][0], "version": __version__, "type": "master"}}}) # Get active agents by node for node_name in clients_info.keys(): clients_info[node_name]["info"]["n_active_agents"]=Agent.get_agents_overview(status='Active', node_name=node_name)['totalItems'] health_info = {"n_connected_nodes":n_connected_nodes, "nodes": clients_info} return health_info
def test_read_configuration(read_config): """ Tests reading the cluster configuration from ossec.conf """ with patch('wazuh.cluster.cluster.get_ossec_conf') as m: m.return_value = read_config.copy() configuration = cluster.read_config() configuration[ 'disabled'] = 'yes' if configuration['disabled'] else 'no' for k in read_config.keys(): assert configuration[k] == read_config[k] # values not present in the read user configuration will be filled with default values if 'disabled' not in read_config and read_config != {}: default_cluster_configuration['disabled'] = 'no' for k in default_cluster_configuration.keys() - read_config.keys(): assert configuration[k] == default_cluster_configuration[k]
def get_healthcheck(self, filter_nodes=None): workers_info = { name: { "info": dict(data['info']), "status": data['status'].copy() } for name, data in self.get_connected_workers().items() if not filter_nodes or name in filter_nodes } n_connected_nodes = len(workers_info) + 1 # workers + master cluster_config = read_config() if not filter_nodes or cluster_config['node_name'] in filter_nodes: workers_info.update({ cluster_config['node_name']: { "info": { "name": cluster_config['node_name'], "ip": cluster_config['nodes'][0], "version": __version__, "type": "master" } } }) # Get active agents by node and format last keep alive date format for node_name in workers_info.keys(): workers_info[node_name]["info"][ "n_active_agents"] = Agent.get_agents_overview( filters={ 'status': 'Active', 'node_name': node_name })['totalItems'] if workers_info[node_name]['info'][ 'type'] != 'master' and isinstance( workers_info[node_name]['status']['last_keep_alive'], float): workers_info[node_name]['status']['last_keep_alive'] = str( datetime.fromtimestamp( workers_info[node_name]['status']['last_keep_alive'])) health_info = { "n_connected_nodes": n_connected_nodes, "nodes": workers_info } return health_info
def master_main(test_name, test_size): # Read config c_config = cluster.read_config() # Initiate master master = MasterManager(c_config) internal_socket_thread = InternalSocketThread("c-internal") internal_socket_thread.start() internal_socket_thread.setmanager(master, MasterInternalSocketHandler) # Test threads if test_name == "test0": # just connect print("Test: just listening") asyncore.loop(timeout=1, map=master.map) elif test_name == 'test1': m_test_thread = MasterTest('thread 0', master, test_name, test_size) m_test_thread.start() # Loop asyncore.loop(timeout=1, map=master.map) print("loop end") elif test_name == 'test2': thread_pool = [] for i in range(10): thread_pool.append(MasterTest('thread {0}'.format(i), master, 'test1', test_size)) for i in range(10): thread_pool[i].start() asyncore.loop(timeout=1, map=master.map) elif test_name == 'testm': m_test_thread = MasterTest('thread0', master, test_name) m_test_thread.start() asyncore.loop(timeout=1, map=master.map) else: print("No test selected") print("Exiting...")
def __init__(self, command: bytes, data: bytes, wait_for_complete: bool): """ Class constructor :param command: Command to send :param data: Payload to send :param wait_for_complete: Whether to enable timeout or not """ super().__init__(configuration=cluster.read_config(), enable_ssl=False, performance_test=0, concurrency_test=0, file='', string=0, logger=logging.getLogger(), tag="Local Client", cluster_items=cluster.get_cluster_items()) self.request_result = None self.command = command self.data = data self.wait_for_complete = wait_for_complete self.protocol = None self.transport = None
def process_request(self, command, data): logger.debug("[Master ] [{0}] [Request-R ]: '{1}'.".format( self.name, command)) if command == 'echo-c': # Echo self.process_keep_alive_from_worker() return 'ok-c ', data.decode() elif command == 'sync_i_c_m_p': result = self.manager.get_worker_status(worker_id=self.name, key='sync_integrity_free') return 'ack', str(result) elif command == 'sync_ai_c_mp': return 'ack', str( self.manager.get_worker_status(worker_id=self.name, key='sync_agentinfo_free')) elif command == 'sync_ev_c_mp': return 'ack', str( self.manager.get_worker_status(worker_id=self.name, key='sync_extravalid_free')) elif command == 'sync_i_c_m': # Worker syncs integrity data = data.decode() pci_thread = ProcessWorkerIntegrity(manager=self.manager, manager_handler=self, filename=data, stopper=self.stopper) pci_thread.start() # data will contain the filename return 'ack', self.set_worker_thread(command, pci_thread, data) elif command == 'sync_ai_c_m': data = data.decode() mcf_thread = ProcessWorkerFiles(manager_handler=self, filename=data, stopper=self.stopper) mcf_thread.start() # data will contain the filename return 'ack', self.set_worker_thread(command, mcf_thread, data) elif command == 'sync_ev_c_m': data = data.decode() mcf_thread = ProcessExtraValidFiles(manager_handler=self, filename=data, stopper=self.stopper) mcf_thread.start() return 'ack', self.set_worker_thread(command, mcf_thread, data) elif command == 'get_nodes': data = data.decode() response = { name: data['info'] for name, data in self.server.get_connected_workers().items() } cluster_config = read_config() response.update({ cluster_config['node_name']: { "name": cluster_config['node_name'], "ip": cluster_config['nodes'][0], "type": "master", "version": __version__ } }) serialized_response = ['json', json.dumps(response)] return serialized_response elif command == 'get_health': _, filter_nodes = data.decode().split(' ', 1) response = self.manager.get_healthcheck( filter_nodes if filter_nodes != 'None' else None) serialized_response = ['json', json.dumps(response)] return serialized_response elif command == 'get_config': response = self.manager.get_configuration() serialized_response = ['ok', json.dumps(response)] return serialized_response elif command == 'string': string_sender_thread = FragmentedStringReceiverMaster( manager_handler=self, stopper=self.stopper) string_sender_thread.start() return 'ack', self.set_worker_thread(command, string_sender_thread) elif command == 'dapi': self.server.add_api_request(self.name + ' ' + data.decode()) return 'ack', "Request is being processed" elif command == "dapi_res": string_receiver = FragmentedAPIResponseReceiver( manager_handler=self, stopper=self.stopper, worker_id=data.decode()) string_receiver.start() return 'ack', self.set_worker_thread(command, string_receiver) elif command == 'err-is': logger.debug("{} Internal socket error received: {}".format( self.tag, data.decode())) return 'ack', 'thanks' else: # Non-master requests return ServerHandler.process_request(self, command, data)
def process_request(self, command, data): logger.debug( "[Transport-I] Forwarding request to master of cluster '{0}' - '{1}'" .format(command, data)) serialized_response = "" if command == 'get_files': split_data = data.split('%--%', 2) file_list = ast.literal_eval( split_data[0]) if split_data[0] else None node_list = ast.literal_eval( split_data[1]) if split_data[1] else None get_my_files = False response = {} if node_list and len(node_list) > 0: #Selected nodes for node in node_list: if node == read_config()['node_name']: get_my_files = True continue node_file = self.manager.send_request( client_name=node, command='file_status', data='') if node_file.split(' ', 1)[0] == 'err': # Error response response.update({node: node_file.split(' ', 1)[1]}) else: response.update( {node: json.loads(node_file.split(' ', 1)[1])}) else: # Broadcast get_my_files = True node_file = list( self.manager.send_request_broadcast(command='file_status')) for node, data in node_file: try: response.update( {node: json.loads(data.split(' ', 1)[1])}) except: # Error response response.update({node: data.split(' ', 1)[1]}) if get_my_files: my_files = get_files_status('master', get_md5=True) my_files.update(get_files_status('client', get_md5=True)) response.update({read_config()['node_name']: my_files}) # Filter files if node_list and len(response): response = {node: response.get(node) for node in node_list} serialized_response = ['ok', json.dumps(response)] return serialized_response elif command == 'get_nodes': split_data = data.split(' ', 1) node_list = ast.literal_eval( split_data[0]) if split_data[0] else None response = { name: data['info'] for name, data in self.manager.get_connected_clients().iteritems() } cluster_config = read_config() response.update({ cluster_config['node_name']: { "name": cluster_config['node_name'], "ip": cluster_config['nodes'][0], "type": "master" } }) if node_list: response = { node: info for node, info in response.iteritems() if node in node_list } serialized_response = ['ok', json.dumps(response)] return serialized_response elif command == 'get_agents': split_data = data.split('%--%', 1) filter_status = split_data[0] if split_data[0] != 'None' else None filter_nodes = split_data[1] if split_data[1] != 'None' else None response = get_agents_status(filter_status, filter_nodes) serialized_response = ['ok', json.dumps(response)] return serialized_response elif command == 'get_health': response = self.manager.get_healthcheck() serialized_response = ['ok', json.dumps(response)] return serialized_response elif command == 'sync': command = "req_sync_m_c" split_data = data.split(' ', 1) node_list = ast.literal_eval( split_data[0]) if split_data[0] else None if node_list: for node in node_list: response = { node: self.manager.send_request(client_name=node, command=command, data="") } serialized_response = ['ok', json.dumps(response)] else: response = list( self.manager.send_request_broadcast(command=command, data=data)) serialized_response = [ 'ok', json.dumps({node: data for node, data in response}) ] return serialized_response else: split_data = data.split(' ', 1) host = split_data[0] data = split_data[1] if len(split_data) > 1 else None if host == 'all': response = list( self.manager.send_request_broadcast(command=command, data=data)) serialized_response = [ 'ok', json.dumps({node: data for node, data in response}) ] else: response = self.manager.send_request(client_name=host, command=command, data=data) if response: type_response = node_response[0] response = node_response[1] if type_response == "err": serialized_response = {"err": response} else: serialized_response = response return serialized_response
# Signals signal(SIGINT, signal_handler) signal(SIGTERM, signal_handler) # Check if it is already running if status()['wazuh-clusterd'] == 'running': clean_exit(reason="wazuh_clusterd is already running", error=True) # Foreground/Daemon if not args.f: res_code = pyDaemon() # Get cluster config try: cluster_config = read_config() except WazuhException as e: clean_exit(reason=str(e), error=True) if not cluster_config or cluster_config['disabled'] == 'yes': clean_exit(reason="Cluster disabled", error=True) # Drop privileges to ossec if not args.r: setgid(common.ossec_gid) seteuid(common.ossec_uid) # Creating pid file create_pid("wazuh-clusterd", getpid()) # Validate config
def process_request(self, command, data): logger.debug("[Transport-I] Forwarding request to master of cluster '{0}' - '{1}'".format(command, data)) serialized_response = "" data = data.decode() if command == 'get_files': split_data = data.split('%--%', 2) node_list = ast.literal_eval(split_data[1]) if split_data[1] else None get_my_files = False response = {} if node_list and len(node_list) > 0: #Selected nodes for node in node_list: if node == read_config()['node_name']: get_my_files = True continue node_file = self.manager.send_request(client_name=node, command='file_status', data='') if node_file.split(' ', 1)[0] == 'err': # Error response response.update({node:node_file.split(' ', 1)[1]}) else: response.update({node:json.loads(node_file.split(' ',1)[1])}) else: # Broadcast get_my_files = True node_file = list(self.manager.send_request_broadcast(command = 'file_status')) for node,data in node_file: try: response.update({node:json.loads(data.split(' ',1)[1])}) except ValueError: # json.loads will raise a ValueError response.update({node:data.split(' ',1)[1]}) if get_my_files: my_files = get_files_status('master', get_md5=True) my_files.update(get_files_status('client', get_md5=True)) response.update({read_config()['node_name']:my_files}) # Filter files if node_list and len(response): response = {node: response.get(node) for node in node_list} serialized_response = ['ok', json.dumps(response)] return serialized_response elif command == 'get_nodes': response = {name:data['info'] for name,data in self.manager.get_connected_clients().items()} cluster_config = read_config() response.update({cluster_config['node_name']:{"name": cluster_config['node_name'], "ip": cluster_config['nodes'][0], "type": "master", "version":__version__}}) serialized_response = ['ok', json.dumps(response)] return serialized_response elif command == 'get_agents': split_data = data.split('%--%', 5) filter_status = split_data[0] if split_data[0] != 'None' else None filter_nodes = split_data[1] if split_data[1] != 'None' else None offset = split_data[2] if split_data[2] != 'None' else None limit = split_data[3] if split_data[3] != 'None' else None sort = split_data[4] if split_data[4] != 'None' else None search = split_data[5] if split_data[5] != 'None' else None response = get_agents_status(filter_status, filter_nodes, offset, limit, sort, search) serialized_response = ['ok', json.dumps(response)] return serialized_response elif command == 'get_health': node_list = data if data != 'None' else None response = self.manager.get_healthcheck(node_list) serialized_response = ['ok', json.dumps(response)] return serialized_response elif command == 'sync': command = "req_sync_m_c" split_data = data.split(' ', 1) node_list = ast.literal_eval(split_data[0]) if split_data[0] else None if node_list: for node in node_list: response = {node:self.manager.send_request(client_name=node, command=command, data="")} serialized_response = ['ok', json.dumps(response)] else: response = list(self.manager.send_request_broadcast(command=command, data=data)) serialized_response = ['ok', json.dumps({node:data for node,data in response})] return serialized_response else: return ['err', json.dumps({'err': "Received an unknown command '{}'".format(command)})]
# Set logger try: debug_mode = configuration.get_internal_options_value( 'wazuh_clusterd', 'debug', 2, 0) or args.debug_level except Exception: debug_mode = 0 # set correct permissions on cluster.log file if os.path.exists('{0}/logs/cluster.log'.format(common.ossec_path)): os.chown('{0}/logs/cluster.log'.format(common.ossec_path), common.ossec_uid, common.ossec_gid) os.chmod('{0}/logs/cluster.log'.format(common.ossec_path), 0o660) main_logger = set_logging(debug_mode) cluster_configuration = cluster.read_config(config_file=args.config_file) if cluster_configuration['disabled']: sys.exit(0) cluster_items = cluster.get_cluster_items() try: cluster.check_cluster_config(cluster_configuration) except Exception as e: main_logger.error(e) sys.exit(1) if args.test_config: sys.exit(0) # clean cluster.clean_up()
'--health', action='store', nargs='?', const='health', help='Show cluster health') args = parser.parse_args() logging.basicConfig(level=logging.DEBUG if args.debug else logging.ERROR, format='%(levelname)s: %(message)s') cluster_status = cluster.get_status_json() if cluster_status['enabled'] == 'no' or cluster_status['running'] == 'no': logging.error("Cluster is not running.") sys.exit(1) cluster_config = cluster.read_config() cluster.check_cluster_config(config=cluster_config) try: if args.filter_status and not args.list_agents: logging.error("Wrong arguments.") parser.print_help() sys.exit(1) elif args.list_agents: my_function, my_args = print_agents, ( args.filter_status, args.filter_node, ) elif args.list_nodes: my_function, my_args = print_nodes, (args.filter_node, ) elif args.health:
# set correct permissions on cluster.log file if os.path.exists('{0}/logs/cluster.log'.format(common.ossec_path)): os.chown('{0}/logs/cluster.log'.format(common.ossec_path), common.ossec_uid, common.ossec_gid) os.chmod('{0}/logs/cluster.log'.format(common.ossec_path), 0o660) # clean cluster.clean_up() # Drop privileges to ossec if not args.root: os.setgid(common.ossec_gid) os.setuid(common.ossec_uid) main_logger = set_logging(args.foreground, debug_mode) cluster_configuration = cluster.read_config() cluster_items = cluster.get_cluster_items() try: cluster.check_cluster_config(cluster_configuration) except Exception as e: main_logger.error(e) sys.exit(0) if cluster_configuration['disabled']: main_logger.info("Cluster disabled. Exiting.") sys.exit(0) pyDaemonModule.create_pid('wazuh-clusterd', os.getpid()) main_function = master_main if cluster_configuration['node_type'] == 'master' else worker_main try: