def setup(self): if not systemd.is_systemd(): logger.warn("Did not detect Systemd, unable to set {0}".format( self._network_setup_service_name)) return if self._is_firewall_service_running(): logger.info( "Firewalld.service present on the VM, setting up permanent rules on the VM" ) # In case of a failure, this would throw. In such a case, we don't need to try to setup our custom service # because on system reboot, all iptable rules are reset by firewalld.service so it would be a no-op. self._setup_permanent_firewalld_rules() # Remove custom service if exists to avoid problems with firewalld try: fileutil.rm_files(*[ self.get_service_file_path(), os.path.join(conf.get_lib_dir(), self.BINARY_FILE_NAME) ]) except Exception as error: logger.info( "Unable to delete existing service {0}: {1}".format( self._network_setup_service_name, ustr(error))) return logger.info( "Firewalld service not running/unavailable, trying to set up {0}". format(self._network_setup_service_name)) self._setup_network_setup_service()
def test_remove_files(self): import random import string import glob random_word = lambda: ''.join( random.choice(string.ascii_uppercase + string.digits) for _ in range(5)) #Create 10 test files test_file = os.path.join(self.tmp_dir, self.test_file) test_file2 = os.path.join(self.tmp_dir, 'another_file') test_files = [test_file + random_word() for _ in range(5)] + \ [test_file2 + random_word() for _ in range(5)] for file in test_files: open(file, 'a').close() #Remove files using fileutil.rm_files test_file_pattern = test_file + '*' test_file_pattern2 = test_file2 + '*' fileutil.rm_files(test_file_pattern, test_file_pattern2) self.assertEqual( 0, len(glob.glob(os.path.join(self.tmp_dir, test_file_pattern)))) self.assertEqual( 0, len(glob.glob(os.path.join(self.tmp_dir, test_file_pattern2))))
def reg_ssh_host_key(self): keypair_type = conf.get_ssh_host_keypair_type() if conf.get_regenerate_ssh_host_key(): fileutil.rm_files(conf.get_ssh_key_glob()) keygen_cmd = "ssh-keygen -N '' -t {0} -f {1}" shellutil.run(keygen_cmd.format(keypair_type, conf.get_ssh_key_private_path())) return self.get_ssh_host_key_thumbprint()
def reg_ssh_host_key(self): keypair_type = conf.get_ssh_host_keypair_type() if conf.get_regenerate_ssh_host_key(): fileutil.rm_files("/etc/ssh/ssh_host_*key*") keygen_cmd = "ssh-keygen -N '' -t {0} -f /etc/ssh/ssh_host_{1}_key" shellutil.run(keygen_cmd.format(keypair_type, keypair_type)) thumbprint = self.get_ssh_host_key_thumbprint(keypair_type) return thumbprint
def del_resolv(): if os.path.realpath('/etc/resolv.conf') != '/run/resolvconf/resolv.conf': logger.info("resolvconf is not configured. Removing /etc/resolv.conf") fileutil.rm_files('/etc/resolv.conf') else: logger.info("resolvconf is enabled; leaving /etc/resolv.conf intact") fileutil.rm_files('/etc/resolvconf/resolv.conf.d/tail', '/etc/resolvconf/resolv.conf.d/originial')
def reg_ssh_host_key(self): keypair_type = conf.get_ssh_host_keypair_type() if conf.get_regenerate_ssh_host_key(): fileutil.rm_files(conf.get_ssh_key_glob()) keygen_cmd = "ssh-keygen -N '' -t {0} -f {1}" shellutil.run( keygen_cmd.format(keypair_type, conf.get_ssh_key_private_path())) return self.get_ssh_host_key_thumbprint()
def remove_extension_cgroups(self, extension_name): # For transient units, cgroups are released automatically when the unit stops, so it is sufficient # to call stop on them. Persistent cgroups are released when the unit is disabled and its configuration # file is deleted. # The assumption is that this method is called after the extension has been uninstalled. For now, since # we're running extensions within transient scopes which clean up after they finish running, no removal # of units is needed. In the future, when the extension is running under its own slice, # the following clean up is needed. unit_filename = self._get_extension_slice_name(extension_name) try: unit_path = os.path.join(UNIT_FILES_FILE_SYSTEM_PATH, unit_filename) shellutil.run_command(["systemctl", "stop", unit_filename]) fileutil.rm_files(unit_path) shellutil.run_command(["systemctl", "daemon-reload"]) except Exception as e: raise CGroupsException("Failed to remove {0}. Error: {1}".format(unit_filename, ustr(e)))
def reg_ssh_host_key(self): keypair_type = conf.get_ssh_host_keypair_type() if conf.get_regenerate_ssh_host_key(): fileutil.rm_files(conf.get_ssh_key_glob()) if conf.get_ssh_host_keypair_mode() == "auto": ''' The -A option generates all supported key types. This is supported since OpenSSH 5.9 (2011). ''' shellutil.run("ssh-keygen -A") else: keygen_cmd = "ssh-keygen -N '' -t {0} -f {1}" shellutil.run(keygen_cmd. format(keypair_type, conf.get_ssh_key_private_path())) return self.get_ssh_host_key_thumbprint()
def reg_ssh_host_key(self): keypair_type = conf.get_ssh_host_keypair_type() if conf.get_regenerate_ssh_host_key(): fileutil.rm_files(conf.get_ssh_key_glob()) if conf.get_ssh_host_keypair_mode() == "auto": ''' The -A option generates all supported key types. This is supported since OpenSSH 5.9 (2011). ''' shellutil.run("ssh-keygen -A") else: keygen_cmd = "ssh-keygen -N '' -t {0} -f {1}" shellutil.run( keygen_cmd.format(keypair_type, conf.get_ssh_key_private_path())) return self.get_ssh_host_key_thumbprint()
def test_remove_files(self): random_word = lambda : ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(5)) #Create 10 test files test_file = os.path.join(self.tmp_dir, self.test_file) test_file2 = os.path.join(self.tmp_dir, 'another_file') test_files = [test_file + random_word() for _ in range(5)] + \ [test_file2 + random_word() for _ in range(5)] for file in test_files: open(file, 'a').close() #Remove files using fileutil.rm_files test_file_pattern = test_file + '*' test_file_pattern2 = test_file2 + '*' fileutil.rm_files(test_file_pattern, test_file_pattern2) self.assertEqual(0, len(glob.glob(os.path.join(self.tmp_dir, test_file_pattern)))) self.assertEqual(0, len(glob.glob(os.path.join(self.tmp_dir, test_file_pattern2))))
def tearDown(self): rm_dirs(self.root_collect_dir) rm_files(self.compressed_archive_path) AgentTestCase.tearDown(self)
def test_log_collector_should_clean_up_uncollected_truncated_files(self): # Make sure that truncated files that are no longer needed are cleaned up. If an existing truncated file # from a previous run is not collected in the current run, it should be deleted to free up space. # Specify files that have priority. The list is ordered, where the first entry has the highest priority. must_collect_files = [os.path.join(self.root_collect_dir, "waagent*")] # Set the archive size limit so that not all files can be collected. In that case, files will be added to the # archive according to their priority. # Set the size limit so that only two files can be collected, of which one needs to be truncated. with patch( "azurelinuxagent.common.logcollector._UNCOMPRESSED_ARCHIVE_SIZE_LIMIT", 2 * SMALL_FILE_SIZE): with patch( "azurelinuxagent.common.logcollector._MUST_COLLECT_FILES", must_collect_files): with patch( "azurelinuxagent.common.logcollector._FILE_SIZE_LIMIT", SMALL_FILE_SIZE): log_collector = LogCollector() archive = log_collector.collect_logs_and_get_archive() self._assert_archive_created(archive) expected_files = [ os.path.join(self.root_collect_dir, "waagent.log"), self._truncated_path( os.path.join( self.root_collect_dir, "waagent.log.1")), # this file should be truncated ] self._assert_files_are_in_archive(expected_files) no_files = self._get_number_of_files_in_archive() self.assertEqual( 2, no_files, "Expected 2 files in archive, found {0}!".format(no_files)) # Remove the original file so it is not collected anymore. In the next collection, the truncated file should be # removed both from the archive and from the filesystem. rm_files(os.path.join(self.root_collect_dir, "waagent.log.1")) with patch( "azurelinuxagent.common.logcollector._UNCOMPRESSED_ARCHIVE_SIZE_LIMIT", 2 * SMALL_FILE_SIZE): with patch( "azurelinuxagent.common.logcollector._MUST_COLLECT_FILES", must_collect_files): with patch( "azurelinuxagent.common.logcollector._FILE_SIZE_LIMIT", SMALL_FILE_SIZE): log_collector = LogCollector() second_archive = log_collector.collect_logs_and_get_archive( ) expected_files = [ os.path.join(self.root_collect_dir, "waagent.log"), os.path.join(self.root_collect_dir, "waagent.log.2.gz"), ] unexpected_files = [ self._truncated_path( os.path.join(self.root_collect_dir, "waagent.log.1")) ] self._assert_files_are_in_archive(expected_files) self._assert_files_are_not_in_archive(unexpected_files) self._assert_archive_created(second_archive) no_files = self._get_number_of_files_in_archive() self.assertEqual( 2, no_files, "Expected 2 files in archive, found {0}!".format(no_files)) truncated_files = os.listdir(self.truncated_files_dir) self.assertEqual( 0, len(truncated_files), "Uncollected truncated file waagent.log.1 should have been deleted!" )
def test_log_collector_should_update_archive_when_files_are_new_or_modified_or_deleted( self): # Ensure the archive reflects the state of files on the disk at collection time. If a file was updated, it # needs to be updated in the archive, deleted if removed from disk, and added if not previously seen. log_collector = LogCollector() first_archive = log_collector.collect_logs_and_get_archive() self._assert_archive_created(first_archive) # Everything should be in the archive expected_files = [ os.path.join(self.root_collect_dir, "waagent.log"), os.path.join(self.root_collect_dir, "waagent.log.1"), os.path.join(self.root_collect_dir, "waagent.log.2.gz"), os.path.join(self.root_collect_dir, "waagent.log.3.gz"), os.path.join(self.root_collect_dir, "less_important_file"), os.path.join(self.root_collect_dir, "another_dir", "least_important_file") ] self._assert_files_are_in_archive(expected_files) no_files = self._get_number_of_files_in_archive() self.assertEqual( 6, no_files, "Expected 6 files in archive, found {0}!".format(no_files)) # Update a file and its last modified time to ensure the last modified time and last collection time are not # the same in this test file_to_update = os.path.join(self.root_collect_dir, "waagent.log") self._create_file_of_specific_size( file_to_update, LARGE_FILE_SIZE) # update existing file new_time = os.path.getmtime(file_to_update) + 5 os.utime(file_to_update, (new_time, new_time)) # Create a new file (that is covered by the manifest and will be collected) and delete a file self._create_file_of_specific_size( os.path.join(self.root_collect_dir, "less_important_file.1"), LARGE_FILE_SIZE) rm_files(os.path.join(self.root_collect_dir, "waagent.log.1")) second_archive = log_collector.collect_logs_and_get_archive() self._assert_archive_created(second_archive) expected_files = [ os.path.join(self.root_collect_dir, "waagent.log"), os.path.join(self.root_collect_dir, "waagent.log.2.gz"), os.path.join(self.root_collect_dir, "waagent.log.3.gz"), os.path.join(self.root_collect_dir, "less_important_file"), os.path.join(self.root_collect_dir, "less_important_file.1"), os.path.join(self.root_collect_dir, "another_dir", "least_important_file") ] unexpected_files = [ os.path.join(self.root_collect_dir, "waagent.log.1") ] self._assert_files_are_in_archive(expected_files) self._assert_files_are_not_in_archive(unexpected_files) file = os.path.join(self.root_collect_dir, "waagent.log") # pylint: disable=redefined-builtin new_file_size = self._get_uncompressed_file_size(file) self.assertEqual( LARGE_FILE_SIZE, new_file_size, "File {0} hasn't been updated! Size in archive is {1}, but " "should be {2}.".format(file, new_file_size, LARGE_FILE_SIZE)) no_files = self._get_number_of_files_in_archive() self.assertEqual( 6, no_files, "Expected 6 files in archive, found {0}!".format(no_files))
def test_log_collector_should_prioritize_important_files_if_archive_too_big( self): # Set the archive size limit so that not all files can be collected. In that case, files will be added to the # archive according to their priority. # Specify files that have priority. The list is ordered, where the first entry has the highest priority. must_collect_files = [ os.path.join(self.root_collect_dir, "waagent*"), os.path.join(self.root_collect_dir, "less_important_file*") ] with patch( "azurelinuxagent.common.logcollector._UNCOMPRESSED_ARCHIVE_SIZE_LIMIT", 10 * 1024 * 1024): with patch( "azurelinuxagent.common.logcollector._MUST_COLLECT_FILES", must_collect_files): log_collector = LogCollector() archive = log_collector.collect_logs_and_get_archive() self._assert_archive_created(archive) expected_files = [ os.path.join(self.root_collect_dir, "waagent.log"), os.path.join(self.root_collect_dir, "waagent.log.1"), os.path.join(self.root_collect_dir, "waagent.log.2.gz") ] unexpected_files = [ os.path.join(self.root_collect_dir, "waagent.log.3.gz"), os.path.join(self.root_collect_dir, "less_important_file"), os.path.join(self.root_collect_dir, "another_dir", "least_important_file") ] self._assert_files_are_in_archive(expected_files) self._assert_files_are_not_in_archive(unexpected_files) no_files = self._get_number_of_files_in_archive() self.assertEqual( 3, no_files, "Expected 3 files in archive, found {0}!".format(no_files)) # Second collection, if a file got deleted, delete it from the archive and add next file on the priority list # if there is enough space. rm_files(os.path.join(self.root_collect_dir, "waagent.log.3.gz")) with patch( "azurelinuxagent.common.logcollector._UNCOMPRESSED_ARCHIVE_SIZE_LIMIT", 10 * 1024 * 1024): with patch( "azurelinuxagent.common.logcollector._MUST_COLLECT_FILES", must_collect_files): second_archive = log_collector.collect_logs_and_get_archive() expected_files = [ os.path.join(self.root_collect_dir, "waagent.log"), os.path.join(self.root_collect_dir, "waagent.log.1"), os.path.join(self.root_collect_dir, "waagent.log.2.gz"), os.path.join(self.root_collect_dir, "less_important_file"), os.path.join(self.root_collect_dir, "another_dir", "least_important_file") ] unexpected_files = [ os.path.join(self.root_collect_dir, "waagent.log.3.gz") ] self._assert_files_are_in_archive(expected_files) self._assert_files_are_not_in_archive(unexpected_files) self._assert_archive_created(second_archive) no_files = self._get_number_of_files_in_archive() self.assertEqual( 5, no_files, "Expected 5 files in archive, found {0}!".format(no_files))