def test_get_agents_overview_status_olderthan(test_data, status, older_than, totalItems, exception): """ Test filtering by status """ with patch('sqlite3.connect') as mock_db: mock_db.return_value = test_data.global_db kwargs = {'filters': {'status': status, 'older_than': older_than}, 'select': {'fields': ['name', 'id', 'status', 'lastKeepAlive', 'dateAdd']}} if exception is None: agents = Agent.get_agents_overview(**kwargs) assert agents['totalItems'] == totalItems else: with pytest.raises(WazuhException, match=f'.* {exception} .*'): Agent.get_agents_overview(**kwargs)
def _get_agent_items(func, offset, limit, select, filters, search, sort, array=False): agents, result = Agent.get_agents_overview(select={'fields':['id']})['items'], [] limit = int(limit) offset = int(offset) found_limit = False total = 0 for agent in agents: items = func(agent_id = agent['id'], select = select, filters = filters, limit = limit, offset = offset, search = search, sort=sort, nested=False) if items == {}: continue total += 1 if not array else items['totalItems'] items = [items] if not array else items['items'] for item in items: if limit + 1 <= len(result): found_limit = True break; item['agent_id'] = agent['id'] result.append(item) if sort and sort['fields']: result = sorted(result, key=itemgetter(sort['fields'][0]), reverse=True if sort['order'] == "desc" else False) return {'items': result, 'totalItems': total}
def get_agents_status(filter_status="all", filter_nodes="all", offset=0, limit=common.database_limit): """ Return a nested list where each element has the following structure [agent_id, agent_name, agent_status, manager_hostname] """ if not offset: offset = 0 if not filter_status: filter_status = "all" if not filter_nodes: filter_nodes = "all" elif filter_nodes != 'all': filter_nodes = ast.literal_eval(filter_nodes) if not limit: limit = common.database_limit agents = Agent.get_agents_overview( filters={ 'status': filter_status, 'node_name': filter_nodes }, select={'fields': ['id', 'ip', 'name', 'status', 'node_name']}, limit=limit, offset=offset) return agents
def _get_agent_items(func, offset, limit, select, filters, search, sort, array=False, query=''): agents, result = Agent.get_agents_overview(select={'fields': ['id']})['items'], [] total = 0 for agent in agents: items = func(agent_id=agent['id'], select=select, filters=filters, limit=limit, offset=offset, search=search, sort=sort, nested=False, q=query) if items == {}: continue total += 1 if not array else items['totalItems'] items = [items] if not array else items['items'] for item in items: if 0 < limit <= len(result): break item['agent_id'] = agent['id'] result.append(item) if result: if sort and sort['fields']: result = sorted(result, key=itemgetter(sort['fields'][0]), reverse=True if sort['order'] == "desc" else False) fields_to_nest, non_nested = get_fields_to_nest(result[0].keys(), '_') else: fields_to_nest, non_nested = None, None return {'items': list(map(lambda x: plain_dict_to_nested_dict(x, fields_to_nest, non_nested, WazuhDBQuerySyscollector.nested_fields, '_'), result)), 'totalItems': total}
def clear(agent_id=None, all_agents=False): """ Clears the database. :param agent_id: For an agent. :param all_agents: For all agents. :return: Message. """ agents = [agent_id] if not all_agents else map( itemgetter('id'), Agent.get_agents_overview(select={'fields': ['id']})['items']) wdb_conn = WazuhDBConnection() for agent in agents: Agent(agent).get_basic_information() # check if the agent exists wdb_conn.execute("agent {} sql delete from fim_entry".format(agent), delete=True) # update key fields which contains keys to value 000 wdb_conn.execute( "agent {} sql update metadata set value = '000' where key like 'fim_db%'" .format(agent), update=True) wdb_conn.execute( "agent {} sql update metadata set value = '000' where key = 'syscheck-db-completed'" .format(agent), update=True) return "Syscheck database deleted"
def get_health(self, filter_node) -> Dict: """ Return healthcheck data :param filter_node: Node to filter by :return: Dictionary """ workers_info = { key: val.to_dict() for key, val in self.clients.items() if filter_node is None or filter_node == {} or key in filter_node } n_connected_nodes = len(workers_info) if filter_node is None or self.configuration[ 'node_name'] in filter_node: workers_info.update( {self.configuration['node_name']: self.to_dict()}) # Get active agents by node and format last keep alive date format for node_name in workers_info.keys(): workers_info[node_name]["info"][ "n_active_agents"] = Agent.get_agents_overview( filters={ 'status': 'Active', 'node_name': node_name })['totalItems'] if workers_info[node_name]['info']['type'] != 'master': workers_info[node_name]['status']['last_keep_alive'] = str( datetime.fromtimestamp( workers_info[node_name]['status']['last_keep_alive'])) return {"n_connected_nodes": n_connected_nodes, "nodes": workers_info}
def test_get_agents_overview_sort(test_data, sort, first_id): """Test sorting.""" with patch('sqlite3.connect') as mock_db: mock_db.return_value = test_data.global_db agents = Agent.get_agents_overview(sort=sort, select={'fields': ['dateAdd']}) assert agents['items'][0]['id'] == first_id
def test_get_agents_overview_query(test_data, query): """ Test filtering by query """ with patch('sqlite3.connect') as mock_db: mock_db.return_value = test_data.global_db agents = Agent.get_agents_overview(q=query) assert len(agents['items']) == 1
def test_get_agents_overview_select(test_data, select, status, older_than, offset): """ Test get_agents_overview function with multiple select parameters """ with patch('sqlite3.connect') as mock_db: mock_db.return_value = test_data.global_db agents = Agent.get_agents_overview(select={'fields': select}, filters={'status': status, 'older_than': older_than}, offset=offset) assert all(map(lambda x: x.keys() == select, agents['items']))
def test_get_agents_overview_search(test_data, search, totalItems): """ Test searching by IP and Register IP """ with patch('sqlite3.connect') as mock_db: mock_db.return_value = test_data.global_db agents = Agent.get_agents_overview(search=search) assert len(agents['items']) == totalItems
def get_solver_node(input_json, master_name): """ Gets the node(s) that can solve a request, the node(s) that has all the necessary information to answer it. Only called when the request type is 'master_distributed' and the node_type is master. :param input_json: API request parameters and description :param master_name: name of the master node :return: node name and whether the result is list or not """ select_node = {'fields':['node_name']} if 'agent_id' in input_json['arguments']: # the request is for multiple agents if isinstance(input_json['arguments']['agent_id'], list): agents = Agent.get_agents_overview(select=select_node, limit=None, filters={'id':input_json['arguments']['agent_id']}, sort={'fields':['node_name'], 'order':'desc'})['items'] node_name = {k:list(map(itemgetter('id'), g)) for k,g in groupby(agents, key=itemgetter('node_name'))} # add non existing ids in the master's dictionary entry non_existent_ids = list(set(input_json['arguments']['agent_id']) - set(map(itemgetter('id'), agents))) if non_existent_ids: if master_name in node_name: node_name[master_name].extend(non_existent_ids) else: node_name[master_name] = non_existent_ids return node_name, True # if the request is only for one agent else: # Get the node where the agent 'agent_id' is reporting node_name = Agent.get_agent(input_json['arguments']['agent_id'], select=select_node)['node_name'] return node_name, False elif 'node_id' in input_json['arguments']: node_id = input_json['arguments']['node_id'] del input_json['arguments']['node_id'] return node_id, False else: # agents, syscheck, rootcheck and syscollector # API calls that affect all agents. For example, PUT/agents/restart, DELETE/rootcheck, etc... agents = Agent.get_agents_overview(select=select_node, limit=None, sort={'fields': ['node_name'], 'order': 'desc'})['items'] node_name = {k:[] for k, _ in groupby(agents, key=itemgetter('node_name'))} return node_name, True
def test_get_agents_overview_default(test_data): """ Test to get all agents using default parameters """ with patch('sqlite3.connect') as mock_db: mock_db.return_value = test_data.global_db agents = Agent.get_agents_overview() # check number of agents assert agents['totalItems'] == 6 # check the return dictionary has all necessary fields for agent in agents['items']: # check no values are returned as None check_agent(test_data, agent)
def get_healthcheck(self, filter_nodes=None): clients_info = {name:{"info":dict(data['info']), "status":data['status']} for name,data in self.get_connected_clients().items() if not filter_nodes or name in filter_nodes} n_connected_nodes = len(self.get_connected_clients().items()) + 1 # clients + master cluster_config = read_config() if not filter_nodes or cluster_config['node_name'] in filter_nodes: clients_info.update({cluster_config['node_name']:{"info":{"name": cluster_config['node_name'], "ip": cluster_config['nodes'][0], "version": __version__, "type": "master"}}}) # Get active agents by node for node_name in clients_info.keys(): clients_info[node_name]["info"]["n_active_agents"]=Agent.get_agents_overview(status='Active', node_name=node_name)['totalItems'] health_info = {"n_connected_nodes":n_connected_nodes, "nodes": clients_info} return health_info
def get_healthcheck(self, filter_nodes=None): workers_info = { name: { "info": dict(data['info']), "status": data['status'].copy() } for name, data in self.get_connected_workers().items() if not filter_nodes or name in filter_nodes } n_connected_nodes = len(workers_info) + 1 # workers + master cluster_config = read_config() if not filter_nodes or cluster_config['node_name'] in filter_nodes: workers_info.update({ cluster_config['node_name']: { "info": { "name": cluster_config['node_name'], "ip": cluster_config['nodes'][0], "version": __version__, "type": "master" } } }) # Get active agents by node and format last keep alive date format for node_name in workers_info.keys(): workers_info[node_name]["info"][ "n_active_agents"] = Agent.get_agents_overview( filters={ 'status': 'Active', 'node_name': node_name })['totalItems'] if workers_info[node_name]['info'][ 'type'] != 'master' and isinstance( workers_info[node_name]['status']['last_keep_alive'], float): workers_info[node_name]['status']['last_keep_alive'] = str( datetime.fromtimestamp( workers_info[node_name]['status']['last_keep_alive'])) health_info = { "n_connected_nodes": n_connected_nodes, "nodes": workers_info } return health_info
def get_agents_status(filter_status="", filter_nodes=""): """ Return a nested list where each element has the following structure [agent_id, agent_name, agent_status, manager_hostname] """ agent_list = [] for agent in Agent.get_agents_overview(select={'fields':['id','ip','name','status','node_name']}, limit=None)['items']: if int(agent['id']) == 0: continue if filter_status and agent['status'] != filter_status: continue if not agent.get('node_name'): agent['node_name'] = "Unknown" if filter_nodes and agent['node_name'] not in filter_nodes: continue agent_list.append([agent['id'], agent['ip'], agent['name'], agent['status'], agent['node_name']]) return agent_list
def get_agents_status(): """ Return a nested list where each element has the following structure [agent_id, agent_name, agent_status, manager_hostname] """ agent_list = [] for agent in Agent.get_agents_overview( select={'fields': ['id', 'ip', 'name', 'status', 'node_name']}, limit=None)['items']: if int(agent['id']) == 0: continue try: agent_list.append([ agent['id'], agent['ip'], agent['name'], agent['status'], agent['node_name'] ]) except KeyError: agent_list.append([ agent['id'], agent['ip'], agent['name'], agent['status'], "None" ]) return agent_list
def get_agents(filter_status, filter_node, is_master): filter_status = ["all"] if not filter_status else filter_status filter_node = ["all"] if not filter_node else filter_node if is_master: return Agent.get_agents_overview( limit=None, filters={ 'status': ','.join(filter_status), 'node_name': ','.join(filter_node) }, select={'fields': ['id', 'ip', 'name', 'status', 'node_name']}) else: input_json = { 'function': '/agents', 'from_cluster': False, 'arguments': { 'filters': { 'status': ','.join(filter_status), 'node_name': ','.join(filter_node) }, 'limit': None, 'select': { 'fields': ['id', 'ip', 'name', 'status', 'node_name'] } } } request = "dapi {}".format(json.dumps(input_json)) response = execute(request) if response.get('err'): raise Exception(response['err']) if response['error'] == 0: return response['data'] else: raise WazuhException(response['error'], response['message'])
def remove_bulk_agents(agent_ids_list: KeysView, logger): """ Removes files created by agents in worker nodes. This function doesn't remove agents from client.keys since the client.keys file is overwritten by the master node. :param agent_ids_list: List of agents ids to remove. :param logger: Logger to use :return: None. """ def remove_agent_file_type(agent_files: List[str]): """ Removes files if they exist :param agent_files: Path regexes of the files to remove :return: None """ for filetype in agent_files: filetype_glob = filetype.format(ossec_path=common.ossec_path, id='*', name='*', ip='*') filetype_agent = { filetype.format(ossec_path=common.ossec_path, id=a['id'], name=a['name'], ip=a['ip']) for a in agent_info } for agent_file in set( glob.iglob(filetype_glob)) & filetype_agent: logger.debug2("Removing {}".format(agent_file)) if os.path.isdir(agent_file): shutil.rmtree(agent_file) else: os.remove(agent_file) if not agent_ids_list: return # the function doesn't make sense if there is no agents to remove logger.info("Removing files from {} agents".format( len(agent_ids_list))) logger.debug("Agents to remove: {}".format(', '.join(agent_ids_list))) # the agents must be removed in groups of 997: 999 is the limit of SQL variables per query. Limit and offset are # always included in the SQL query, so that leaves 997 variables as limit. for agents_ids_sublist in itertools.zip_longest(*itertools.repeat( iter(agent_ids_list), 997), fillvalue='0'): agents_ids_sublist = list( filter(lambda x: x != '0', agents_ids_sublist)) # Get info from DB agent_info = Agent.get_agents_overview( q=",".join(["id={}".format(i) for i in agents_ids_sublist]), select={'fields': ['ip', 'id', 'name']}, limit=None)['items'] logger.debug2("Removing files from agents {}".format( ', '.join(agents_ids_sublist))) files_to_remove = [ '{ossec_path}/queue/agent-info/{name}-{ip}', '{ossec_path}/queue/rootcheck/({name}) {ip}->rootcheck', '{ossec_path}/queue/diff/{name}', '{ossec_path}/queue/agent-groups/{id}', '{ossec_path}/queue/rids/{id}', '{ossec_path}/var/db/agents/{name}-{id}.db' ] remove_agent_file_type(files_to_remove) logger.debug2("Removing agent group assigments from database") # remove agent from groups db_global = glob.glob(common.database_path_global) if not db_global: raise WazuhException(1600) conn = Connection(db_global[0]) agent_ids_db = { 'id_agent{}'.format(i): int(i) for i in agents_ids_sublist } conn.execute( 'delete from belongs where {}'.format(' or '.join([ 'id_agent = :{}'.format(i) for i in agent_ids_db.keys() ])), agent_ids_db) conn.commit() # Tell wazuhbd to delete agent database wdb_conn = WazuhDBConnection() wdb_conn.delete_agents_db(agents_ids_sublist) logger.info("Agent files removed")
def remove_bulk_agents(agent_ids_list, logger): """ Removes files created by agents in worker nodes. This function doesn't remove agents from client.keys since the client.keys file is overwritten by the master node. :param agent_ids_list: List of agents ids to remove. :return: None. """ def remove_agent_file_type(glob_args, agent_args, agent_files): for filetype in agent_files: for agent_file in set(glob.iglob(filetype.format(common.ossec_path, *glob_args))) & \ {filetype.format(common.ossec_path, *(a[arg] for arg in agent_args)) for a in agent_info}: logger.debug2("Removing {}".format(agent_file)) if os.path.isdir(agent_file): shutil.rmtree(agent_file) else: os.remove(agent_file) if not agent_ids_list: return # the function doesn't make sense if there is no agents to remove logger.info("Removing files from {} agents".format( len(agent_ids_list))) logger.debug("Agents to remove: {}".format(', '.join(agent_ids_list))) # the agents must be removed in groups of 997: 999 is the limit of SQL variables per query. Limit and offset are # always included in the SQL query, so that leaves 997 variables as limit. for agents_ids_sublist in itertools.zip_longest(*itertools.repeat( iter(agent_ids_list), 997), fillvalue='0'): agents_ids_sublist = list( filter(lambda x: x != '0', agents_ids_sublist)) # Get info from DB agent_info = Agent.get_agents_overview( q=",".join(["id={}".format(i) for i in agents_ids_sublist]), select={'fields': ['ip', 'id', 'name']}, limit=None)['items'] logger.debug2("Removing files from agents {}".format( ', '.join(agents_ids_sublist))) # Remove agent files that need agent name and ip agent_files = [ '{}/queue/agent-info/{}-{}', '{}/queue/rootcheck/({}) {}->rootcheck' ] remove_agent_file_type(('*', '*'), ('name', 'ip'), agent_files) # remove agent files that need agent name agent_files = ['{}/queue/diff/{}'] remove_agent_file_type(('*', ), ('name', ), agent_files) # Remove agent files that only need agent id agent_files = [ '{}/queue/agent-groups/{}', '{}/queue/rids/{}', '{}/queue/db/{}.db', '{}/queue/db/{}.db-wal', '{}/queue/db/{}.db-shm' ] remove_agent_file_type(('*', ), ('id', ), agent_files) # remove agent files that need agent name and id agent_files = ['{}/var/db/agents/{}-{}.db'] remove_agent_file_type(('*', '*'), ('id', 'name'), agent_files) # remove agent from groups db_global = glob.glob(common.database_path_global) if not db_global: raise WazuhException(1600) conn = Connection(db_global[0]) agent_ids_db = { 'id_agent{}'.format(i): int(i) for i in agents_ids_sublist } conn.execute( 'delete from belongs where {}'.format(' or '.join([ 'id_agent = :{}'.format(i) for i in agent_ids_db.keys() ])), agent_ids_db) conn.commit() logger.info("Agent files removed")
def test_agent_overview(self): agents = Agent.get_agents_overview() self.assertGreater(agents["totalItems"], 1) self.assertTrue(agents["items"], "No agents: items")
def test_agent_overview(self): agents = Agent.get_agents_overview() self.assertGreater(agents['totalItems'], 1) self.assertTrue(agents['items'], 'No agents: items')
# - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/var/ossec/framework/lib from sys import path, exit import json # cwd = /var/ossec/api/framework/examples #framework_path = '{0}'.format(path[0][:-9]) # cwd = /var/ossec/api #framework_path = '{0}/framework'.format(path[0]) # Default path framework_path = '/var/ossec/api/framework' path.append(framework_path) try: from wazuh import Wazuh from wazuh.agent import Agent except Exception as e: print("No module 'wazuh' found.") exit() if __name__ == "__main__": # Creating wazuh object # It is possible to specify the ossec path (path argument) or get /etc/ossec-init.conf (get_init argument) print("\nWazuh:") myWazuh = Wazuh(get_init=True) print(myWazuh) print("\nAgents:") agents = Agent.get_agents_overview() print(json.dumps(agents, indent=4, sort_keys=True))
# - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/var/ossec/lib from sys import path, exit import json # cwd = /var/ossec/api/framework/examples #framework_path = '{0}'.format(path[0][:-9]) # cwd = /var/ossec/api #framework_path = '{0}/framework'.format(path[0]) # Default path framework_path = '/var/ossec/api/framework' path.append(framework_path) try: from wazuh import Wazuh from wazuh.agent import Agent except Exception as e: print("No module 'wazuh' found.") exit() if __name__ == "__main__": # Creating wazuh object # It is possible to specify the ossec path (path argument) or get /etc/ossec-init.conf (get_init argument) print("\nWazuh:") myWazuh = Wazuh(get_init=True) print(myWazuh) print("\nAgents:") agents = Agent.get_agents_overview(status="all") print(json.dumps(agents, indent=4, sort_keys=True))
def _update_worker_files_in_master(self, json_file, zip_dir_path, worker_name, cluster_control_key, cluster_control_subkey, tag): def update_file(n_errors, name, data, file_time=None, content=None, agents=None): # Full path full_path = common.ossec_path + name error_updating_file = False # Cluster items information: write mode and umask w_mode = cluster_items[data['cluster_item_key']]['write_mode'] umask = cluster_items[data['cluster_item_key']]['umask'] if content is None: zip_path = "{}/{}".format(zip_dir_path, name) with open(zip_path, 'rb') as f: content = f.read() lock_full_path = "{}/queue/cluster/lockdir/{}.lock".format( common.ossec_path, os.path.basename(full_path)) lock_file = open(lock_full_path, 'a+') try: fcntl.lockf(lock_file, fcntl.LOCK_EX) _update_file(file_path=name, new_content=content, umask_int=umask, mtime=file_time, w_mode=w_mode, tmp_dir=tmp_path, whoami='master', agents=agents) except WazuhException as e: logger.debug2("{}: Warning updating file '{}': {}".format( tag, name, e)) error_tag = 'warnings' error_updating_file = True except Exception as e: logger.debug2("{}: Error updating file '{}': {}".format( tag, name, e)) error_tag = 'errors' error_updating_file = True if error_updating_file: n_errors[error_tag][data['cluster_item_key']] = 1 if not n_errors[error_tag].get(data['cluster_item_key']) \ else n_errors[error_tag][data['cluster_item_key']] + 1 fcntl.lockf(lock_file, fcntl.LOCK_UN) lock_file.close() return n_errors, error_updating_file # tmp path tmp_path = "/queue/cluster/{}/tmp_files".format(worker_name) cluster_items = get_cluster_items()['files'] n_merged_files = 0 n_errors = {'errors': {}, 'warnings': {}} # create temporary directory for lock files lock_directory = "{}/queue/cluster/lockdir".format(common.ossec_path) if not os.path.exists(lock_directory): mkdir_with_mode(lock_directory) try: agents = Agent.get_agents_overview(select={'fields': ['name']}, limit=None)['items'] agent_names = set(map(itemgetter('name'), agents)) agent_ids = set(map(itemgetter('id'), agents)) except Exception as e: logger.debug2("{}: Error getting agent ids and names: {}".format( tag, e)) agent_names, agent_ids = {}, {} before = time.time() try: for filename, data in json_file.items(): if data['merged']: for file_path, file_data, file_time in unmerge_agent_info( data['merge_type'], zip_dir_path, data['merge_name']): n_errors, error_updating_file = update_file( n_errors, file_path, data, file_time, file_data, (agent_names, agent_ids)) if not error_updating_file: n_merged_files += 1 if self.stopper.is_set(): break else: n_errors, _ = update_file(n_errors, filename, data) except Exception as e: logger.error("{}: Error updating worker files: '{}'.".format( tag, e)) raise e after = time.time() logger.debug( "{0}: Time updating worker files: {1:.2f}s. Total of updated worker files: {2}." .format(tag, after - before, n_merged_files)) if sum(n_errors['errors'].values()) > 0: logging.error("{}: Errors updating worker files: {}".format( tag, ' | '.join([ '{}: {}'.format(key, value) for key, value in n_errors['errors'].items() ]))) if sum(n_errors['warnings'].values()) > 0: for key, value in n_errors['warnings'].items(): if key == '/queue/agent-info/': logger.debug2( "Received {} agent statuses for non-existent agents. Skipping." .format(value)) elif key == '/queue/agent-groups/': logger.debug2( "Received {} group assignments for non-existent agents. Skipping." .format(value)) # Save info for healthcheck self.manager.set_worker_status(worker_id=self.name, key=cluster_control_key, subkey=cluster_control_subkey, status=n_merged_files)
def _update_client_files_in_master(self, json_file, files_to_update_json, zip_dir_path, client_name, cluster_control_key, cluster_control_subkey, tag): def update_file(n_errors, name, data, file_time=None, content=None, agents=None): # Full path full_path = common.ossec_path + name # Cluster items information: write mode and umask w_mode = cluster_items[data['cluster_item_key']]['write_mode'] umask = int(cluster_items[data['cluster_item_key']]['umask'], base=0) if content is None: zip_path = "{}/{}".format(zip_dir_path, name) with open(zip_path, 'rb') as f: content = f.read() lock_full_path = "{}/queue/cluster/lockdir/{}.lock".format( common.ossec_path, os.path.basename(full_path)) lock_file = open(lock_full_path, 'a+') try: fcntl.lockf(lock_file, fcntl.LOCK_EX) _update_file(file_path=name, new_content=content, umask_int=umask, mtime=file_time, w_mode=w_mode, tmp_dir=tmp_path, whoami='master', agents=agents) except Exception as e: logger.debug2("{}: Error updating file '{}': {}".format( tag, name, e)) n_errors[data['cluster_item_key']] = 1 if not n_errors.get(data['cluster_item_key']) \ else n_errors[data['cluster_item_key']] + 1 fcntl.lockf(lock_file, fcntl.LOCK_UN) lock_file.close() return n_errors # tmp path tmp_path = "/queue/cluster/{}/tmp_files".format(client_name) cluster_items = get_cluster_items()['files'] n_agentsinfo = 0 n_agentgroups = 0 n_errors = {} # create temporary directory for lock files lock_directory = "{}/queue/cluster/lockdir".format(common.ossec_path) if not os.path.exists(lock_directory): mkdir_with_mode(lock_directory) try: agents = Agent.get_agents_overview(select={'fields': ['name']}, limit=None)['items'] agent_names = set(map(itemgetter('name'), agents)) agent_ids = set(map(itemgetter('id'), agents)) agents = None except Exception as e: logger.debug2("{}: Error getting agent ids and names: {}".format( tag, e)) agent_names, agent_ids = {}, {} before = time.time() try: for filename, data in json_file.items(): if data['merged']: for file_path, file_data, file_time in unmerge_agent_info( data['merge_type'], zip_dir_path, data['merge_name']): n_errors = update_file(n_errors, file_path, data, file_time, file_data, (agent_names, agent_ids)) if data['merge_type'] == 'agent-info': n_agentsinfo += 1 else: n_agentgroups += 1 if self.stopper.is_set(): break else: n_errors = update_file(n_errors, filename, data) except Exception as e: logger.error("{}: Error updating client files: '{}'.".format( tag, e)) raise e after = time.time() logger.debug( "{0}: Time updating client files: {1:.2f}s. Agents-info updated total: {2}. Agent-groups updated total: {3}." .format(tag, after - before, n_agentsinfo, n_agentgroups)) if sum(n_errors.values()) > 0: logging.error("{}: Errors updating client files: {}".format( tag, ' | '.join([ '{}: {}'.format(key, value) for key, value in n_errors.items() ]))) # Save info for healthcheck status_number = n_agentsinfo if cluster_control_key == 'last_sync_agentinfo' else n_agentgroups self.manager.set_client_status(client_id=self.name, key=cluster_control_key, subkey=cluster_control_subkey, status=status_number)
async def process_files_from_worker(self, files_checksums: Dict, decompressed_files_path: str, logger): """ Iterates over received files from worker and updates the local ones :param files_checksums: A dictionary containing file metadata :param decompressed_files_path: Filepath of the decompressed received zipfile :param logger: The logger to use :return: None """ async def update_file(name: str, data: Dict): """ Updates a file from the worker. It checks the modification date to decide whether to update it or not. If it's a merged file, it unmerges it. :param name: Filename to update :param data: File metadata :return: None """ # Full path full_path, error_updating_file, n_merged_files = common.ossec_path + name, False, 0 # Cluster items information: write mode and permissions lock_full_path = "{}/queue/cluster/lockdir/{}.lock".format(common.ossec_path, os.path.basename(full_path)) lock_file = open(lock_full_path, 'a+') try: fcntl.lockf(lock_file, fcntl.LOCK_EX) if os.path.basename(name) == 'client.keys': self.logger.warning("Client.keys received in a master node") raise WazuhException(3007) if data['merged']: is_agent_info = data['merge_type'] == 'agent-info' if is_agent_info: self.sync_agent_info_status['total_agent_info'] = len(agent_ids) else: self.sync_extra_valid_status['total_extra_valid'] = len(agent_ids) for file_path, file_data, file_time in cluster.unmerge_agent_info(data['merge_type'], decompressed_files_path, data['merge_name']): full_unmerged_name = os.path.join(common.ossec_path, file_path) tmp_unmerged_path = os.path.join(common.ossec_path, 'queue/cluster', self.name, os.path.basename(file_path)) try: if is_agent_info: agent_name_re = re.match(r'(^.+)-(.+)$', os.path.basename(file_path)) agent_name = agent_name_re.group(1) if agent_name_re else os.path.basename(file_path) if agent_name not in agent_names: n_errors['warnings'][data['cluster_item_key']] = 1 \ if n_errors['warnings'].get(data['cluster_item_key']) is None \ else n_errors['warnings'][data['cluster_item_key']] + 1 self.logger.debug2("Received status of an non-existent agent '{}'".format(agent_name)) continue else: agent_id = os.path.basename(file_path) if agent_id not in agent_ids: n_errors['warnings'][data['cluster_item_key']] = 1 \ if n_errors['warnings'].get(data['cluster_item_key']) is None \ else n_errors['warnings'][data['cluster_item_key']] + 1 self.logger.debug2("Received group of an non-existent agent '{}'".format(agent_id)) continue try: mtime = datetime.strptime(file_time, '%Y-%m-%d %H:%M:%S.%f') except ValueError: mtime = datetime.strptime(file_time, '%Y-%m-%d %H:%M:%S') if os.path.isfile(full_unmerged_name): local_mtime = datetime.utcfromtimestamp(int(os.stat(full_unmerged_name).st_mtime)) # check if the date is older than the manager's date if local_mtime > mtime: logger.debug2("Receiving an old file ({})".format(file_path)) continue with open(tmp_unmerged_path, 'wb') as f: f.write(file_data) mtime_epoch = timegm(mtime.timetuple()) utils.safe_move(tmp_unmerged_path, full_unmerged_name, ownership=(common.ossec_uid(), common.ossec_gid()), permissions=self.cluster_items['files'][data['cluster_item_key']]['permissions'], time=(mtime_epoch, mtime_epoch) ) except Exception as e: self.logger.error("Error updating agent group/status ({}): {}".format(tmp_unmerged_path, e)) if is_agent_info: self.sync_agent_info_status['total_agent_info'] -= 1 else: self.sync_extra_valid_status['total_extra_valid'] -= 1 n_errors['errors'][data['cluster_item_key']] = 1 \ if n_errors['errors'].get(data['cluster_item_key']) is None \ else n_errors['errors'][data['cluster_item_key']] + 1 await asyncio.sleep(0.0001) else: zip_path = "{}{}".format(decompressed_files_path, name) utils.safe_move(zip_path, full_path, ownership=(common.ossec_uid(), common.ossec_gid()), permissions=self.cluster_items['files'][data['cluster_item_key']]['permissions'] ) except WazuhException as e: logger.debug2("Warning updating file '{}': {}".format(name, e)) error_tag = 'warnings' error_updating_file = True except Exception as e: logger.debug2("Error updating file '{}': {}".format(name, e)) error_tag = 'errors' error_updating_file = True if error_updating_file: n_errors[error_tag][data['cluster_item_key']] = 1 if not n_errors[error_tag].get( data['cluster_item_key']) \ else n_errors[error_tag][data['cluster_item_key']] + 1 fcntl.lockf(lock_file, fcntl.LOCK_UN) lock_file.close() # tmp path tmp_path = "/queue/cluster/{}/tmp_files".format(self.name) n_merged_files = 0 n_errors = {'errors': {}, 'warnings': {}} # create temporary directory for lock files lock_directory = "{}/queue/cluster/lockdir".format(common.ossec_path) if not os.path.exists(lock_directory): utils.mkdir_with_mode(lock_directory) try: agents = Agent.get_agents_overview(select={'fields': ['name']}, limit=None)['items'] agent_names = set(map(operator.itemgetter('name'), agents)) agent_ids = set(map(operator.itemgetter('id'), agents)) except Exception as e: logger.debug2("Error getting agent ids and names: {}".format(e)) agent_names, agent_ids = {}, {} try: for filename, data in files_checksums.items(): await update_file(data=data, name=filename) shutil.rmtree(decompressed_files_path) except Exception as e: self.logger.error("Error updating worker files: '{}'.".format(e)) raise e if sum(n_errors['errors'].values()) > 0: logger.error("Errors updating worker files: {}".format(' | '.join( ['{}: {}'.format(key, value) for key, value in n_errors['errors'].items()]) )) if sum(n_errors['warnings'].values()) > 0: for key, value in n_errors['warnings'].items(): if key == '/queue/agent-info/': logger.debug2("Received {} agent statuses for non-existent agents. Skipping.".format(value)) elif key == '/queue/agent-groups/': logger.debug2("Received {} group assignments for non-existent agents. Skipping.".format(value))
import sys import json try: from wazuh import Wazuh from wazuh.agent import Agent except Exception as e: print("No module 'wazuh' found.") sys.exit() if __name__ == "__main__": # Creating wazuh object # It is possible to specify the ossec path (path argument) or get /etc/ossec-init.conf (get_init argument) print("\nWazuh:") myWazuh = Wazuh(get_init=True) print(myWazuh) print("\nAgents:") agents = Agent.get_agents_overview(status="all") print(json.dumps(agents, indent=4, sort_keys=True)) print("\nAdding 'WazuhFrameworkTest':") agent = Agent() agent_id = agent.add("WazuhFrameworkTest", "Any") print("\nAgent added with ID: {0}".format(agent_id)) print("\nAgent key: {0}".format(agent.get_key())) agent.get() print("\nAgent info:") print(json.dumps(agent.to_dict(), indent=4, sort_keys=True))