def overwrite_or_create_files(filename: str, data: Dict): """ Updates a file coming from the master :param filename: Filename to update :param data: File metadata such as modification time, whether it's a merged file or not, etc. :return: None """ full_filename_path = common.ossec_path + filename if os.path.basename(filename) == 'client.keys': self._check_removed_agents("{}{}".format(zip_path, filename), logger) if data['merged']: # worker nodes can only receive agent-groups files if data['merge-type'] == 'agent-info': logger.warning("Agent status received in a worker node") raise WazuhException(3011) for name, content, _ in cluster.unmerge_agent_info('agent-groups', zip_path, filename): full_unmerged_name = os.path.join(common.ossec_path, name) tmp_unmerged_path = full_unmerged_name + '.tmp' with open(tmp_unmerged_path, 'wb') as f: f.write(content) safe_move(tmp_unmerged_path, full_unmerged_name, permissions=self.cluster_items['files'][data['cluster_item_key']]['permissions'], ownership=(common.ossec_uid(), common.ossec_gid()) ) else: if not os.path.exists(os.path.dirname(full_filename_path)): utils.mkdir_with_mode(os.path.dirname(full_filename_path)) safe_move("{}{}".format(zip_path, filename), full_filename_path, permissions=self.cluster_items['files'][data['cluster_item_key']]['permissions'], ownership=(common.ossec_uid(), common.ossec_gid()) )
def test_remove_manual(grp_mock, pwd_mock, chmod_r_mock, makedirs_mock, safe_move_mock, isdir_mock, isfile_mock, exists_mock, glob_mock, stat_mock, chmod_mock, chown_mock, rmtree_mock, remove_mock, wdb_mock, test_data, backup): """ Test the _remove_manual function """ client_keys_text = '\n'.join([f'{str(row["id"]).zfill(3)} {row["name"]} {row["register_ip"]} {row["internal_key"]}' for row in test_data.global_db.execute( 'select id, name, register_ip, internal_key from agent where id > 0')]) with patch('wazuh.agent.open', mock_open(read_data=client_keys_text)) as m: with patch('sqlite3.connect') as mock_db: mock_db.return_value = test_data.global_db Agent('001')._remove_manual(backup=backup) m.assert_any_call(common.client_keys) m.assert_any_call(common.client_keys + '.tmp', 'w') stat_mock.assert_called_once_with(common.client_keys) chown_mock.assert_called_once_with(common.client_keys + '.tmp', common.ossec_uid(), common.ossec_gid()) remove_mock.assert_any_call(os.path.join(common.ossec_path, 'queue/rids/001')) # make sure the mock is called with a string according to a non-backup path exists_mock.assert_any_call('{0}/queue/agent-info/agent-1-any'.format(test_data_path)) safe_move_mock.assert_called_with(common.client_keys + '.tmp', common.client_keys, permissions=0o640) if backup: backup_path = os.path.join(common.backup_path, f'agents/1975/Jan/01/001-agent-1-any') makedirs_mock.assert_called_once_with(backup_path) chmod_r_mock.assert_called_once_with(backup_path, 0o750)
def safe_move(source, target, ownership=(common.ossec_uid(), common.ossec_gid()), time=None, permissions=None): """Moves a file even between filesystems This function is useful to move files even when target directory is in a different filesystem from the source. Write permissions are required on target directory. :param source: full path to source file :param target: full path to target file :param ownership: tuple in the form (user, group) to be set up after the file is moved :param time: tuple in the form (addition_timestamp, modified_timestamp) :param permissions: string mask in octal notation. I.e.: '0o640' """ # Create temp file. Move between tmp_path, tmp_filename = path.split(target) tmp_target = path.join(tmp_path, f".{tmp_filename}.tmp") shutil.move(source, tmp_target, copy_function=shutil.copyfile) try: # Overwrite the file atomically. rename(tmp_target, target) except OSError: # This is the last try when target is still in a different filesystem. # For example, when target is a mounted file in a Docker container # However, this is not an atomic operation and could lead to race conditions # if the file is read/written simultaneously with other processes shutil.move(tmp_target, target, copy_function=shutil.copyfile) # Set up metadata chown(target, *ownership) if permissions is not None: chmod(target, permissions) if time is not None: utime(target, time)
def safe_move(source, target, ownership=(common.ossec_uid(), common.ossec_gid()), time=None, permissions=None): """Moves a file even between filesystems This function is useful to move files even when target directory is in a different filesystem from the source. Write permissions are required on target directory. :param source: full path to source file :param target: full path to target file :param ownership: tuple in the form (user, group) to be set up after the file is moved :param time: tuple in the form (addition_timestamp, modified_timestamp) :param permissions: string mask in octal notation. I.e.: '0o640' """ # Create temp file. Move between tmp_target = f"{target}.tmp" shutil.move(source, tmp_target, copy_function=shutil.copyfile) # Overwrite the file atomically shutil.move(tmp_target, target, copy_function=shutil.copyfile) # Set up metadata chown(target, *ownership) if permissions is not None: chmod(target, permissions) if time is not None: utime(target, time)
def doRollover(self): """ Override base class method to make the set the appropiate permissions to the new log file """ # Rotate the file first logging.handlers.TimedRotatingFileHandler.doRollover(self) # Set appropiate permissions chown(self.baseFilename, common.ossec_uid(), common.ossec_gid()) chmod(self.baseFilename, 0o660) # Save rotated file in /logs/ossec directory rotated_file = glob.glob("{}.*".format(self.baseFilename))[0] new_rotated_file = self.computeArchivesDirectory(rotated_file) with open(rotated_file, 'rb') as f_in, gzip.open(new_rotated_file, 'wb') as f_out: copyfileobj(f_in, f_out) chmod(new_rotated_file, 0o640) unlink(rotated_file)
async def update_file(name: str, data: Dict): """ Updates a file from the worker. It checks the modification date to decide whether to update it or not. If it's a merged file, it unmerges it. :param name: Filename to update :param data: File metadata :return: None """ # Full path full_path, error_updating_file, n_merged_files = common.ossec_path + name, False, 0 # Cluster items information: write mode and permissions lock_full_path = "{}/queue/cluster/lockdir/{}.lock".format(common.ossec_path, os.path.basename(full_path)) lock_file = open(lock_full_path, 'a+') try: fcntl.lockf(lock_file, fcntl.LOCK_EX) if os.path.basename(name) == 'client.keys': self.logger.warning("Client.keys received in a master node") raise WazuhException(3007) if data['merged']: is_agent_info = data['merge_type'] == 'agent-info' if is_agent_info: self.sync_agent_info_status['total_agent_info'] = len(agent_ids) else: self.sync_extra_valid_status['total_extra_valid'] = len(agent_ids) for file_path, file_data, file_time in cluster.unmerge_agent_info(data['merge_type'], decompressed_files_path, data['merge_name']): full_unmerged_name = os.path.join(common.ossec_path, file_path) tmp_unmerged_path = os.path.join(common.ossec_path, 'queue/cluster', self.name, os.path.basename(file_path)) try: if is_agent_info: agent_name_re = re.match(r'(^.+)-(.+)$', os.path.basename(file_path)) agent_name = agent_name_re.group(1) if agent_name_re else os.path.basename(file_path) if agent_name not in agent_names: n_errors['warnings'][data['cluster_item_key']] = 1 \ if n_errors['warnings'].get(data['cluster_item_key']) is None \ else n_errors['warnings'][data['cluster_item_key']] + 1 self.logger.debug2("Received status of an non-existent agent '{}'".format(agent_name)) continue else: agent_id = os.path.basename(file_path) if agent_id not in agent_ids: n_errors['warnings'][data['cluster_item_key']] = 1 \ if n_errors['warnings'].get(data['cluster_item_key']) is None \ else n_errors['warnings'][data['cluster_item_key']] + 1 self.logger.debug2("Received group of an non-existent agent '{}'".format(agent_id)) continue try: mtime = datetime.strptime(file_time, '%Y-%m-%d %H:%M:%S.%f') except ValueError: mtime = datetime.strptime(file_time, '%Y-%m-%d %H:%M:%S') if os.path.isfile(full_unmerged_name): local_mtime = datetime.utcfromtimestamp(int(os.stat(full_unmerged_name).st_mtime)) # check if the date is older than the manager's date if local_mtime > mtime: logger.debug2("Receiving an old file ({})".format(file_path)) continue with open(tmp_unmerged_path, 'wb') as f: f.write(file_data) mtime_epoch = timegm(mtime.timetuple()) utils.safe_move(tmp_unmerged_path, full_unmerged_name, ownership=(common.ossec_uid(), common.ossec_gid()), permissions=self.cluster_items['files'][data['cluster_item_key']]['permissions'], time=(mtime_epoch, mtime_epoch) ) except Exception as e: self.logger.error("Error updating agent group/status ({}): {}".format(tmp_unmerged_path, e)) if is_agent_info: self.sync_agent_info_status['total_agent_info'] -= 1 else: self.sync_extra_valid_status['total_extra_valid'] -= 1 n_errors['errors'][data['cluster_item_key']] = 1 \ if n_errors['errors'].get(data['cluster_item_key']) is None \ else n_errors['errors'][data['cluster_item_key']] + 1 await asyncio.sleep(0.0001) else: zip_path = "{}{}".format(decompressed_files_path, name) utils.safe_move(zip_path, full_path, ownership=(common.ossec_uid(), common.ossec_gid()), permissions=self.cluster_items['files'][data['cluster_item_key']]['permissions'] ) except WazuhException as e: logger.debug2("Warning updating file '{}': {}".format(name, e)) error_tag = 'warnings' error_updating_file = True except Exception as e: logger.debug2("Error updating file '{}': {}".format(name, e)) error_tag = 'errors' error_updating_file = True if error_updating_file: n_errors[error_tag][data['cluster_item_key']] = 1 if not n_errors[error_tag].get( data['cluster_item_key']) \ else n_errors[error_tag][data['cluster_item_key']] + 1 fcntl.lockf(lock_file, fcntl.LOCK_UN) lock_file.close()
if args.version: print_version() sys.exit(0) # Set logger try: debug_mode = configuration.get_internal_options_value( 'wazuh_clusterd', 'debug', 2, 0) or args.debug_level except Exception: debug_mode = 0 # set correct permissions on cluster.log file if os.path.exists('{0}/logs/cluster.log'.format(common.ossec_path)): os.chown('{0}/logs/cluster.log'.format(common.ossec_path), common.ossec_uid(), common.ossec_gid()) os.chmod('{0}/logs/cluster.log'.format(common.ossec_path), 0o660) main_logger = set_logging(debug_mode) cluster_configuration = cluster.read_config(config_file=args.config_file) if cluster_configuration['disabled']: sys.exit(0) cluster_items = cluster.get_cluster_items() try: cluster.check_cluster_config(cluster_configuration) except Exception as e: main_logger.error(e) sys.exit(1) if args.test_config:
def test_ossec_uid(): with patch('wazuh.common.getpwnam', return_value=getpwnam("root")): ossec_uid()