def update_node_db_after_sync(data, node, node_name, cluster_socket, my_type): logging.info("Updating {}'s ({}) file status in DB".format(node_name, node)) for updated in divide_list(data['files']['updated']): update_sql = "update2" for u in updated: update_sql += " synchronized {0} /{1}".format(node, u) send_to_socket(cluster_socket, update_sql) received = receive_data_from_db_socket(cluster_socket) for failed in divide_list(data['files']['error']): delete_sql = "delete1" update_sql = "update2" for f in failed: if isinstance(f, dict): if f['reason'] == 'Error 3012 - Received an old agent-info file.' and my_type=='client': delete_sql += " /{0}".format(f['item']) else: update_sql += " failed {0} /{1}".format(node, f['item']) else: update_sql += " failed {0} {1}".format(node, f) send_to_socket(cluster_socket, update_sql) received = receive_data_from_db_socket(cluster_socket) if len(delete_sql) > len("delete1"): send_to_socket(cluster_socket, delete_sql) received = receive_data_from_db_socket(cluster_socket) for invalid in divide_list(data['files']['invalid']): update_sql = "update2" for i in invalid: update_sql += " invalid {0} {1}".format(node, i) send_to_socket(cluster_socket, update_sql) received = receive_data_from_db_socket(cluster_socket)
def update_node_db_after_sync(data, node, cluster_socket): logging.info("Updating {0}'s file status in DB".format(node)) for updated in divide_list(data['files']['updated']): update_sql = "update2" for u in updated: update_sql += " synchronized {0} /{1}".format(node, u) send_recv_and_check(cluster_socket, update_sql) for failed in divide_list(data['files']['error']): # delete_sql = "delete1" update_sql = "update2" for f in failed: if isinstance(f, dict): if f['reason'].startswith('Error 3012'): # set old files as synchronized so the node doesn't send them anymore update_sql += " synchronized {} /{}".format(node, f['item']) else: update_sql += " failed {0} /{1}".format(node, f['item']) else: update_sql += " failed {0} {1}".format(node, f) send_recv_and_check(cluster_socket, update_sql) # if len(delete_sql) > len("delete1"): # send_to_socket(cluster_socket, delete_sql) # received = receive_data_from_db_socket(cluster_socket) for deleted in divide_list(data['files']['deleted']): update_sql = "update2" for d in deleted: update_sql += " deleted {0} {1}".format(node, d) send_recv_and_check(cluster_socket, update_sql)
def update_node_db_after_sync(data, node, node_name, cluster_socket, my_type): logging.info("Updating {}'s ({}) file status in DB".format(node_name, node)) statuses = {'updated': 'synchronized', 'error': 'failed', 'invalid': 'invalid', 'pending': 'pending'} for file_status, file_list in data['files'].items(): status = file_status if not file_status in statuses.keys() else statuses[file_status] if status not in statuses.values(): logging.debug("Can't save status {} in database".format(status)) continue for files in divide_list(file_list): update_sql = "update2" delete_sql = "delete1" for f in files: if isinstance(f, dict): filename = '/' + f['item'] if f['item'][0] != '/' else f['item'] if f['reason'] == 'Error 3012 - Received an old agent-info file.' and my_type=='client': delete_sql += " {}".format(filename) else: update_sql += " {} {} {}".format(status, node, filename) else: filename = '/' + f if f[0] != '/' else f update_sql += " {} {} {}".format(status, node, filename) send_to_socket(cluster_socket, update_sql) received = receive_data_from_db_socket(cluster_socket) if len(delete_sql) > len("delete1"): send_to_socket(cluster_socket, delete_sql) received = receive_data_from_db_socket(cluster_socket)
def clear_file_status(): """ Function to set all database files' status to pending Cleans actual_master table """ cluster_socket = connect_to_db_socket(retry=True) # clean last actual master node send_to_socket(cluster_socket, "delactual"); receive_data_from_db_socket(cluster_socket) # Get information of files from filesystem config_cluster = read_config() if not config_cluster: raise WazuhException(3000, "No config found") own_items = list_files_from_filesystem(config_cluster['node_type'], get_cluster_items()) # n files DB send_to_socket(cluster_socket, "countfiles") n_files_db = int(receive_data_from_db_socket(cluster_socket)) # Only update status for modified files if n_files_db > 0: # Get information of files from DB (limit = 100) query = "selfiles 100 " file_status = "" for offset in range(0, n_files_db, 100): query += str(offset) send_to_socket(cluster_socket, query) file_status += receive_data_from_db_socket(cluster_socket) db_items = {filename:{'md5': md5, 'timestamp': timestamp} for filename, md5, timestamp in map(lambda x: x.split('*'), filter(lambda x: x != '', file_status.split(' ')))} # Update status query = "update1 " new_items = {} for files_slice in divide_list(own_items.items()): try: local_items = dict(filter(lambda x: db_items[x[0]]['md5'] != x[1]['md5'] or int(db_items[x[0]]['timestamp']) < int(x[1]['timestamp']), files_slice)) except KeyError as e: new_items[e.args[0]] = {'md5': own_items[e.args[0]]['md5'], 'timestamp': own_items[e.args[0]]['timestamp']} logging.warning("File not found in database: {0}".format(e.args[0])) continue query += ' '.join(local_items.keys()) send_to_socket(cluster_socket, query) received = receive_data_from_db_socket(cluster_socket) new_items.update(local_items) else: new_items = own_items update_file_info_bd(cluster_socket, new_items) cluster_socket.close()
def get_file_status_of_one_node(node, own_items_names, cluster_socket, my_type, all_items=None): # check files in database node_url, node_type, _ = node own_items = own_items_names if node_type == 'client' or not all_items else all_items count_query = "count {0}".format(node_url) send_to_socket(cluster_socket, count_query) n_files = int(receive_data_from_db_socket(cluster_socket)) if n_files == 0: logging.info("New manager found: {0}".format(node_url)) logging.debug("Adding {0}'s files to database".format(node_url)) # if the manager is not in the database, add it with all files for files in divide_list(own_items): insert_sql = "insert" for file in files: insert_sql += " {0} {1}".format(node_url, file) send_to_socket(cluster_socket, insert_sql) data = receive_data_from_db_socket(cluster_socket) all_files = {file:'pending' for file in own_items} else: logging.debug("Retrieving {0}'s files from database".format(node_url)) all_files = get_file_status(node_url, cluster_socket) # if there are missing files that are not being controled in database # add them as pending for missing in divide_list(set(own_items) - set(all_files.keys())): insert_sql = "insert" for m in missing: all_files[m] = 'pending' insert_sql += " {0} {1}".format(node_url,m) send_to_socket(cluster_socket, insert_sql) data = receive_data_from_db_socket(cluster_socket) if my_type == 'master': # non elected master only send their agent-infos, and not all files they have # on the database all_files = dict(filter(lambda x: x[0] in own_items, all_files.items())) return all_files
def update_file_info_bd(cluster_socket, files): """ Function to update the files' information in database """ for file in divide_list(files.items()): query = "insertfile " for fname, finfo in file: query += "{} {} {} ".format(fname, finfo['md5'], finfo['timestamp']) send_recv_and_check(cluster_socket, query)
def update_file_info_bd(cluster_socket, files): """ Function to update the files' information in database """ query = "insertfile " for file in divide_list(files.items()): for fname, finfo in file: query += "{} {} {} ".format(fname, finfo['md5'], finfo['timestamp']) send_to_socket(cluster_socket, query) received = receive_data_from_db_socket(cluster_socket)
def clear_file_status_one_node(manager, cluster_socket): """ Function to set the status of all manager's files to pending """ files = get_file_status(manager, cluster_socket).keys() for file in divide_list(files): update_sql = "update2" for f in file: update_sql += " pending {0} {1}".format(manager, f) send_recv_and_check(cluster_socket, update_sql)
def scan_for_new_files_one_node(node, cluster_items, cluster_config, cluster_socket=None, own_items=None): if not own_items: own_items = list_files_from_filesystem(cluster_config['node_type'], cluster_items) own_items_names = own_items.keys() # check files in database count_query = "count {0}".format(node) send_to_socket(cluster_socket, count_query) n_files = int(filter(lambda x: x != '\x00', cluster_socket.recv(10000))) if n_files == 0: logging.info("New manager found: {0}".format(node)) logging.debug("Adding {0}'s files to database".format(node)) # if the manager is not in the database, add it with all files for files in divide_list(own_items_names): insert_sql = "insert" for file in files: insert_sql += " {0} {1}".format(node, file) send_to_socket(cluster_socket, insert_sql) data = cluster_socket.recv(10000) else: logging.debug("Retrieving {0}'s files from database".format(node)) all_files = get_file_status(node, cluster_socket) # if there are missing files that are not being controled in database # add them as pending for missing in divide_list(set(own_items_names) - set(all_files.keys())): insert_sql = "insert" for m in missing: all_files[m] = 'pending' insert_sql += " {0} {1}".format(node,m) send_to_socket(cluster_socket, insert_sql) data = receive_data_from_db_socket(cluster_socket)
def scan_for_new_files_one_node(node, cluster_items, cluster_config, my_type, cluster_socket=None, own_items=None, all_items=None): node_url, node_type, node_name = node if not own_items: own_items = list_files_from_filesystem(cluster_config['node_type'], cluster_items) own_items_names = own_items.keys() if node_type == 'client' or not all_items else all_items # check files in database count_query = "count {0}".format(node_url) send_to_socket(cluster_socket, count_query) n_files = int(receive_data_from_db_socket(cluster_socket)) removed = False if n_files == 0: logging.debug("Node {} ({}) not found in database.".format(node_name, node_url)) logging.debug("Adding {}'s ({}) files to database".format(node_name, node_url)) # if the manager is not in the database, add it with all files for files in divide_list(own_items_names): insert_sql = "insert" for file in files: insert_sql += " {0} {1}".format(node_url, file) send_to_socket(cluster_socket, insert_sql) data = receive_data_from_db_socket(cluster_socket) all_files = {file:'pending' for file in own_items_names} else: logging.info("Retrieving {}'s ({}) files from database".format(node_name, node_url)) all_files = get_file_status(node_url, cluster_socket) all_files = {f['filename']:f['status'] for f in all_files} # if there are missing files that are not being controled in database # add them as pending set_own_items = set(own_items_names) set_all_files = set(all_files.keys()) for missing in divide_list(set_own_items - set_all_files): insert_sql = "insert" for m in missing: all_files[m] = 'pending' insert_sql += " {0} {1}".format(node_url,m) send_to_socket(cluster_socket, insert_sql) data = receive_data_from_db_socket(cluster_socket) # remove files that are not present in the filesystem but present in the database for missing in divide_list(set_all_files - set_own_items): removed = True delete_sql = "delete1" for m in missing: delete_sql += " {}".format(m) send_to_socket(cluster_socket, delete_sql) data = receive_data_from_db_socket(cluster_socket) if my_type == 'master': removed = False # non elected master only send their agent-infos, and not all files they have # on the database all_files = dict(filter(lambda x: x[0] in own_items, all_files.items())) return all_files, removed
def scan_for_new_files_one_node(node, cluster_items, cluster_config, cluster_socket=None, own_items=None, remove_rows=False): if not own_items: own_items = list_files_from_filesystem(cluster_config['node_type'], cluster_items) own_items_names = own_items.keys() # check files in database count_query = "count {0}".format(node) send_to_socket(cluster_socket, count_query) n_files = int(filter(lambda x: x != '\x00', cluster_socket.recv(10000))) removed = False if n_files == 0: logging.info("New manager found: {0}".format(node)) logging.debug("Adding {0}'s files to database".format(node)) # if the manager is not in the database, add it with all files for files in divide_list(own_items_names): insert_sql = "insert" for file in files: insert_sql += " {0} {1}".format(node, file) send_to_socket(cluster_socket, insert_sql) data = cluster_socket.recv(10000) all_files = {file:'pending' for file in own_items_names} else: logging.debug("Retrieving {0}'s files from database".format(node)) all_files = get_file_status(node, cluster_socket) # if there are missing files that are not being controled in database # add them as pending set_own_items = set(own_items_names) set_all_files = set(all_files.keys()) for missing in divide_list(set_own_items - set_all_files): insert_sql = "insert" for m in missing: all_files[m] = 'pending' insert_sql += " {0} {1}".format(node,m) send_to_socket(cluster_socket, insert_sql) data = receive_data_from_db_socket(cluster_socket) # remove files that are not present in the filesystem but present in the database # and have not been marked as deleted files_in_db = set_all_files - set_own_items for missing in divide_list(filter(lambda x: all_files[x] != "tobedeleted" and all_files[x] != 'deleted', files_in_db)): delete_sql = "update2" removed = True for m in missing: delete_sql += " tobedeleted {} {}".format(node, m) send_to_socket(cluster_socket, delete_sql) data = receive_data_from_db_socket(cluster_socket) if remove_rows: for deleted in divide_list(filter(lambda x: all_files[x] == 'deleted', set_all_files)): delete_sql = "delete2" for d in deleted: delete_sql += " {} {}".format(node,d) send_to_socket(cluster_socket, delete_sql) data = receive_data_from_db_socket(cluster_socket) return all_files, removed