class TestTemporary: def setup(self): self.temporary = Temporary() @patch('kiwi.utils.temporary.NamedTemporaryFile') def test_new_file(self, mock_NamedTemporaryFile): self.temporary.new_file() mock_NamedTemporaryFile.assert_called_once_with(dir='/var/tmp', prefix='kiwi_') @patch('kiwi.utils.temporary.TemporaryDirectory') def test_new_dir(self, mock_TemporaryDirectory): self.temporary.new_dir() mock_TemporaryDirectory.assert_called_once_with(dir='/var/tmp', prefix='kiwi_')
def create(self, name: str, mbsize: int, type_name: str, flags: List[str] = None) -> None: """ Create DASD partition :param string name: partition name :param int mbsize: partition size :param string type_name: unused :param list flags: unused """ self.partition_id += 1 fdasd_input = Temporary().new_file() with open(fdasd_input.name, 'w') as partition: log.debug('%s: fdasd: n p cur_position +%sM w q', name, format(mbsize)) if mbsize == 'all_free': partition.write('n\np\n\n\nw\nq\n') else: partition.write(f'n\np\n\n+{mbsize}M\nw\nq\n') bash_command = ' '.join( ['cat', fdasd_input.name, '|', 'fdasd', '-f', self.disk_device]) try: Command.run(['bash', '-c', bash_command]) except Exception: # unfortunately fdasd reports that it can't read in the partition # table which I consider a bug in fdasd. However the table was # correctly created and therefore we continue. Problem is that we # are not able to detect real errors with the fdasd operation at # that point. log.debug('potential fdasd errors were ignored')
def _create_sortfile(self): """ Create isolinux sort file :return: iso sort file name :rtype: str """ self.iso_sortfile = Temporary().new_file() catalog_file = \ self.source_dir + '/' + self.boot_path + '/boot.catalog' loader_file = \ self.source_dir + '/' + self.boot_path + '/loader/isolinux.bin' with open(self.iso_sortfile.name, 'w') as sortfile: sortfile.write('%s 3\n' % catalog_file) sortfile.write('%s 2\n' % loader_file) boot_files = list(os.walk(self.source_dir + '/' + self.boot_path)) boot_files += list(os.walk(self.source_dir + '/EFI')) for basedir, dirnames, filenames in sorted(boot_files): for entry in sorted(dirnames + filenames): if entry in filenames and entry == 'efi': sortfile.write('%s/%s 1000001\n' % (basedir, entry)) else: sortfile.write('%s/%s 1\n' % (basedir, entry)) sortfile.write('%s/%s 1000000\n' % (self.source_dir, 'header_end')) return self.iso_sortfile.name
def sign_verification_metadata(self) -> None: """ Create an openssl based signature from the metadata block and attach it at the end of the block. This method requires access to a private key for signing. The path to the private key is read from the kiwi runtime config file from the following section: credentials: - verification_metadata_signing_key_file: /path/to/pkey """ if self.verification_metadata_file: runtime_config = RuntimeConfig() signing_key_file = runtime_config.\ get_credentials_verification_metadata_signing_key_file() if not signing_key_file: raise KiwiCredentialsError( '{0} not configured in runtime config'.format( 'verification_metadata_signing_key_file')) signature_file = Temporary().new_file() Command.run([ 'openssl', 'dgst', '-sha256', '-sigopt', 'rsa_padding_mode:pss', '-sigopt', 'rsa_pss_saltlen:-1', '-sigopt', 'rsa_mgf1_md:sha256', '-sign', signing_key_file, '-out', signature_file.name, self.verification_metadata_file.name ]) with open(signature_file.name, 'rb') as sig_fd: signature = sig_fd.read() with open(self.verification_metadata_file.name, 'ab') as meta: meta.write(signature)
def post_init(self, custom_args: List = []) -> None: """ Post initialization method Store custom pacman arguments and create runtime configuration and environment :param list custom_args: pacman arguments """ self.custom_args = custom_args self.check_signatures = False self.repo_names: List = [] self.runtime_pacman_config_file = Temporary( dir=self.root_dir).new_file() if 'check_signatures' in self.custom_args: self.custom_args.remove('check_signatures') self.check_signatures = True manager_base = self.shared_location + '/pacman' self.shared_pacman_dir = { 'cache-dir': manager_base + '/cache', 'repos-dir': manager_base + '/repos' } Path.create(self.shared_pacman_dir['repos-dir']) self.pacman_args = [ '--config', self.runtime_pacman_config_file.name, '--noconfirm' ] self._write_runtime_config()
def apply_xslt_stylesheets(self, description: str) -> str: """ Apply XSLT style sheet rules to an xml file The result of the XSLT processing is stored in a named temporary file and returned to the caller :param str description: path to an XML description file """ # Parse the provided description, raising the appropriate # exception if parsing fails. try: parsed_description = etree.parse(description) except etree.XMLSyntaxError: raise KiwiConfigFileFormatNotSupported( 'Configuration file could not be parsed. ' 'In case your configuration file is XML it most likely ' 'contains a syntax error. For other formats the ' 'Python anymarkup module is required.') xslt_transform = etree.XSLT( etree.parse(Defaults.get_xsl_stylesheet_file())) self.description_xslt_processed = Temporary( prefix='kiwi_xslt-').new_file() with open(self.description_xslt_processed.name, "wb") as xsltout: xsltout.write(etree.tostring(xslt_transform(parsed_description))) return self.description_xslt_processed.name
def create_image_format(self) -> None: """ Create GCE disk format and manifest """ gce_tar_ball_file_list = [] temp_image_dir = Temporary(prefix='kiwi_gce_subformat.', dir=self.target_dir).new_dir() diskname = ''.join([ self.target_dir, '/', self.xml_state.xml_data.get_name(), '.' + self.arch, '-' + self.xml_state.get_image_version(), '.raw' ]) if self.tag: with open(temp_image_dir.name + '/manifest.json', 'w') as manifest: manifest.write('{"licenses": ["%s"]}' % self.tag) gce_tar_ball_file_list.append('manifest.json') Command.run(['cp', diskname, temp_image_dir.name + '/disk.raw']) gce_tar_ball_file_list.append('disk.raw') archive_name = os.path.basename( self.get_target_file_path_for_format(self.image_format)) # delete the '.gz' suffix from the name. The suffix is appended by # the archive creation method depending on the creation type. archive_name = archive_name.replace('.gz', '') archive = ArchiveTar(filename=self.target_dir + '/' + archive_name, file_list=gce_tar_ball_file_list) archive.create_gnu_gzip_compressed(temp_image_dir.name)
def uncompress(self, temporary=False): """ Uncompress with format autodetection By default the original source file will be changed into the uncompressed variant. If temporary is set to True a temporary file is created instead :param bool temporary: uncompress to a temporary file """ zipper = self.get_format() if not zipper: raise KiwiCompressionFormatUnknown( 'could not detect compression format for %s' % self.source_filename) if not temporary: Command.run([zipper, '-d', self.source_filename]) self.uncompressed_filename = self.source_filename else: self.temp_file = Temporary().new_file() bash_command = [ zipper, '-c', '-d', self.source_filename, '>', self.temp_file.name ] Command.run(['bash', '-c', ' '.join(bash_command)]) self.uncompressed_filename = self.temp_file.name return self.uncompressed_filename
def create_initrd(self, mbrid: Optional[SystemIdentifier] = None, basename: Optional[str] = None, install_initrd: bool = False) -> None: """ Create initrd from prepared boot system tree and compress the result :param SystemIdentifier mbrid: instance of ImageIdentifier :param str basename: base initrd file name :param bool install_initrd: installation media initrd """ if self.is_prepared(): log.info('Creating initrd cpio archive') # we can't simply exclude boot when building the archive # because the file boot/mbrid must be preserved. Because of # that we create a copy of the boot directory and remove # everything in boot/ except for boot/mbrid. The original # boot directory should not be changed because we rely # on other data in boot/ e.g the kernel to be available # for the entire image building process if basename: kiwi_initrd_basename = basename else: kiwi_initrd_basename = self.initrd_base_name temp_boot_root = Temporary(prefix='kiwi_boot_root_copy.').new_dir() temp_boot_root_directory = temp_boot_root.name os.chmod(temp_boot_root_directory, 0o755) data = DataSync(self.boot_root_directory + '/', temp_boot_root_directory) data.sync_data(options=['-a']) boot_directory = temp_boot_root_directory + '/boot' Path.wipe(boot_directory) if mbrid: log.info('--> Importing mbrid: %s', mbrid.get_id()) Path.create(boot_directory) image_identifier = boot_directory + '/mbrid' mbrid.write(image_identifier) cpio = ArchiveCpio( os.sep.join([self.target_dir, kiwi_initrd_basename])) # the following is a list of directories which were needed # during the process of creating an image but not when the # image is actually booting with this initrd exclude_from_archive = [ '/' + Defaults.get_shared_cache_location(), '/image', '/usr/lib/grub*' ] # the following is a list of directories to exclude which # are not needed inside of the initrd exclude_from_archive += [ '/usr/share/doc', '/usr/share/man', '/home', '/media', '/srv' ] cpio.create(source_dir=temp_boot_root_directory, exclude=exclude_from_archive) log.info('--> xz compressing archive') compress = Compress( os.sep.join([self.target_dir, kiwi_initrd_basename])) compress.xz(['--check=crc32', '--lzma2=dict=1MiB', '--threads=0']) self.initrd_filename = compress.compressed_filename
def create(self) -> None: """ Create new system root directory The method creates a temporary directory and initializes it for the purpose of building a system image from it. This includes the following setup: * create core system paths * create static core device nodes On success the contents of the temporary location are synced to the specified root_dir and the temporary location will be deleted. That way we never work on an incomplete initial setup :raises KiwiRootInitCreationError: if the init creation fails at some point """ root = Temporary(prefix='kiwi_root.').new_dir() Path.create(self.root_dir) try: self._create_base_directories(root.name) self._create_base_links(root.name) data = DataSync(root.name + '/', self.root_dir) data.sync_data(options=['-a', '--ignore-existing']) if Defaults.is_buildservice_worker(): copy(os.sep + Defaults.get_buildservice_env_name(), self.root_dir) except Exception as e: self.delete() raise KiwiRootInitCreationError('%s: %s' % (type(e).__name__, format(e)))
def wipe(self): """ Zap (destroy) any GPT and MBR data structures if present For DASD disks create a new VTOC table """ if 'dasd' in self.table_type: log.debug('Initialize DASD disk with new VTOC table') fdasd_input = Temporary().new_file() with open(fdasd_input.name, 'w') as vtoc: vtoc.write('y\n\nw\nq\n') bash_command = ' '.join([ 'cat', fdasd_input.name, '|', 'fdasd', '-f', self.storage_provider.get_device() ]) try: Command.run(['bash', '-c', bash_command]) except Exception: # unfortunately fdasd reports that it can't read in the # partition table which I consider a bug in fdasd. However # the table was correctly created and therefore we continue. # Problem is that we are not able to detect real errors # with the fdasd operation at that point. log.debug('potential fdasd errors were ignored') else: log.debug('Initialize %s disk', self.table_type) Command.run( ['sgdisk', '--zap-all', self.storage_provider.get_device()])
def setup_mountpoint(self): """ Implements creation of a master directory holding the mounts of all volumes """ self.mountpoint_tempdir = Temporary(prefix='kiwi_volumes.').new_dir() self.mountpoint = self.mountpoint_tempdir.name self.temp_directories.append(self.mountpoint_tempdir)
def __init__(self, device, mountpoint=None): self.device = device if not mountpoint: self.mountpoint_tempdir = Temporary( prefix='kiwi_mount_manager.').new_dir() self.mountpoint = self.mountpoint_tempdir.name else: self.mountpoint = mountpoint
def __init__(self, device: str, mountpoint: str = ''): self.device = device if not mountpoint: self.mountpoint_tempdir = Temporary( prefix='kiwi_mount_manager.').new_dir() self.mountpoint = self.mountpoint_tempdir.name else: Path.create(mountpoint) self.mountpoint = mountpoint
def post_init(self, custom_args: List = []) -> None: """ Post initialization method Store custom apt-get arguments and create runtime configuration and environment :param list custom_args: apt-get arguments """ self.custom_args = custom_args self.exclude_docs = False self.signing_keys: List = [] # extract custom arguments used for apt config only if 'exclude_docs' in self.custom_args: self.custom_args.remove('exclude_docs') self.exclude_docs = True if 'check_signatures' in self.custom_args: self.custom_args.remove('check_signatures') self.unauthenticated = 'false' else: self.unauthenticated = 'true' self.distribution: str = '' self.distribution_path: str = '' self.debootstrap_repo_set = False self.repo_names: List = [] self.components: List = [] # apt-get support is based on creating a sources file which # contains path names to the repo and its cache. In order to # allow a persistent use of the files in and outside of a chroot # call an active bind mount from RootBind::mount_shared_directory # is expected and required self.manager_base = self.shared_location + '/apt-get' self.shared_apt_get_dir = { 'sources-dir': self.manager_base + '/sources.list.d', 'preferences-dir': self.manager_base + '/preferences.d' } self.keyring = '{}/trusted.gpg'.format(self.manager_base) self.runtime_apt_get_config_file = Temporary( dir=self.root_dir ).new_file() self.apt_get_args = [ '-q', '-c', self.runtime_apt_get_config_file.name, '-y' ] + self.custom_args self.command_env = self._create_apt_get_runtime_environment() # config file for apt-get tool self.apt_conf = PackageManagerTemplateAptGet() self._write_runtime_config()
def unpack(self): """ Unpack current container root data """ self.oci_root_dir_tempdir = Temporary( prefix='kiwi_oci_root_dir.').new_dir() self.oci_root_dir = self.oci_root_dir_tempdir.name Command.run([ 'umoci', 'unpack', '--image', self.working_image, self.oci_root_dir ])
def _create_temporary_metadata_dir(self): """ Create and manage a temporary metadata directory :return: the path of the temporary directory just created :rtype: str """ metadata_dir = Temporary(prefix='kiwi_metadata_dir.').new_dir() self.repository_metadata_dirs.append(metadata_dir) return metadata_dir.name
def _create_extended(self, name: str) -> None: """ Create extended msdos partition """ self.partition_id += 1 fdisk_input = Temporary().new_file() with open(fdisk_input.name, 'w') as partition: log.debug('%s: fdisk: n e %d cur_position +all_freeM w q', name, self.partition_id) partition.write('n\ne\n{0}\n{1}\n{2}\nw\nq\n'.format( self.partition_id, '', '')) self._call_fdisk(fdisk_input.name)
def create(self, filename, base_image=None, ensure_empty_tmpdirs=None): """ Create WSL/Appx archive :param string filename: archive file name :param string base_image: not-supported :param string ensure_empty_tmpdirs: not-supported """ exclude_list = Defaults.\ get_exclude_list_for_root_data_sync() + Defaults.\ get_exclude_list_from_custom_exclude_files(self.root_dir) exclude_list.append('boot') exclude_list.append('dev') exclude_list.append('sys') exclude_list.append('proc') # The C code of WSL-DistroLauncher harcodes the name for the # root tarball to be install.tar.gz. Thus we have to use this # name for the root tarball archive_file_name = os.sep.join( [self.meta_data_path, 'install.tar'] ) archive = ArchiveTar( archive_file_name ) archive_file_name = archive.create( self.root_dir, exclude=exclude_list ) compressor = Compress(archive_file_name) archive_file_name = compressor.gzip() filemap_file = Temporary().new_file() with open(filemap_file.name, 'w') as filemap: filemap.write('[Files]{0}'.format(os.linesep)) for topdir, dirs, files in sorted(os.walk(self.meta_data_path)): for entry in sorted(dirs + files): if entry in files: mapfile = os.sep.join([topdir, entry]) mapfile_relative = os.path.relpath(mapfile, start=self.meta_data_path) log.info( 'Adding {0} to Appx filemap as relative path {1}'.format(mapfile, mapfile_relative) ) filemap.write( '"{0}" "{1}"{2}'.format( mapfile, mapfile_relative, os.linesep ) ) Command.run( ['appx', '-o', filename, '-f', filemap_file.name] ) return filename
def post_init(self): """ Initializes some umoci parameters and options """ self.oci_dir_tempfile = Temporary(prefix='kiwi_oci_dir.').new_dir() self.oci_dir = self.oci_dir_tempfile.name self.container_dir = os.sep.join([self.oci_dir, 'oci_layout']) self.working_image = '{0}:{1}'.format( self.container_dir, Defaults.get_container_base_image_tag()) if CommandCapabilities.has_option_in_help('umoci', '--no-history', ['config', '--help']): self.no_history_flag = ['--no-history'] else: self.no_history_flag = []
def create_image_format(self) -> None: """ Create qcow2 disk format """ intermediate = Temporary().new_file() Command.run([ 'qemu-img', 'convert', '-f', 'raw', self.diskname, '-O', self.image_format ] + self.options + [intermediate.name]) Command.run([ 'qemu-img', 'convert', '-c', '-f', self.image_format, intermediate.name, '-O', self.image_format, self.get_target_file_path_for_format(self.image_format) ])
def _get_pacman_packages(self): """ Download Arch repository listing for the current architecture :return: html directory listing :rtype: str """ dir_listing_download = Temporary().new_file() self.download_from_repository(defaults.PLATFORM_MACHINE, dir_listing_download.name) if os.path.isfile(dir_listing_download.name): with open(dir_listing_download.name) as listing: return listing.read()
def _create_solvables(self, metadata_dir, tool): """ Create intermediate (before merge) SAT solvables from the data given in the metadata_dir and store the result in the temporary repository_solvable_dir. The given tool must match the solvable data structure. There are the following tools to create a solvable from repository metadata: * rpmmd2solv solvable from repodata files * susetags2solv solvable from SUSE (yast2) repository files * comps2solv solvable from RHEL component files * rpms2solv solvable from rpm header files * deb2solv solvable from deb header files :param str metadata_dir: path name :param str tool: one of the above tools """ if not self.repository_solvable_dir: self.repository_solvable_dir = Temporary( prefix='kiwi_solvable_dir.').new_dir() if tool == 'rpms2solv': # solvable is created from a bunch of rpm files bash_command = [ tool, os.sep.join([metadata_dir, '*.rpm']), '>', self._get_random_solvable_name() ] Command.run(['bash', '-c', ' '.join(bash_command)]) else: # each file in the metadata_dir is considered a valid # solvable for the selected solv tool tool_options = [] if tool == 'deb2solv': tool_options.append('-r') for source in glob.iglob('/'.join([metadata_dir, '*'])): bash_command = [ 'gzip', '-cd', '--force', source, '|', tool ] + tool_options + ['>', self._get_random_solvable_name()] Command.run(['bash', '-c', ' '.join(bash_command)])
def _get_repomd_xml(self, lookup_path='repodata'): """ Parse repomd.xml file from lookup_path and return an etree This method only applies to rpm-md type repositories :param str lookup_path: relative path used to find repomd.xml file :return: Object with repomd.xml contents :rtype: XML etree """ xml_download = Temporary().new_file() xml_setup_file = os.sep.join([lookup_path, 'repomd.xml']) self.download_from_repository(xml_setup_file, xml_download.name) return etree.parse(xml_download.name)
def quote_key_value_file(filename: str) -> List[str]: """ Quote given input file which has to be of the form key=value to be able to become sourced by the shell :param str filename: file path name :return: list of quoted text :rtype: List[str] """ temp_copy = Temporary().new_file() Command.run(['cp', filename, temp_copy.name]) Shell.run_common_function('baseQuoteFile', [temp_copy.name]) with open(temp_copy.name) as quoted: return quoted.read().splitlines()
def create(self, filename: str) -> None: """ Create bash quoted profile :param str filename: file path name """ sorted_profile = self.get_settings() temp_profile = Temporary().new_file() with open(temp_profile.name, 'w') as profile: for key, value in list(sorted_profile.items()): profile.write('{0}={1}{2}'.format(key, value, os.linesep)) profile_environment = Shell.quote_key_value_file(temp_profile.name) with open(filename, 'w') as profile: for line in profile_environment: profile.write(line + os.linesep) log.debug('--> {0}'.format(line))
def _color_json(self): """ Show data in json output format with nice color highlighting """ out_file = Temporary().new_file() out_file.write(json.dumps(self.data, sort_keys=True).encode()) out_file.flush() pjson_cmd = ''.join( ['cat ', out_file.name, ' | pjson'] ) os.system(pjson_cmd)
def create(self, name, mbsize, type_name, flags=None): """ Create msdos partition :param string name: partition name :param int mbsize: partition size :param string type_name: partition type :param list flags: additional flags """ self.partition_id += 1 fdisk_input = Temporary().new_file() if self.partition_id > 1: # Undefined start sector value skips this for fdisk and # use its default value self.start_sector = None with open(fdisk_input.name, 'w') as partition: log.debug( '%s: fdisk: n p %d cur_position +%sM w q', name, self.partition_id, format(mbsize) ) partition.write( 'n\np\n{0}\n{1}\n{2}\nw\nq\n'.format( self.partition_id, '' if not self.start_sector else self.start_sector, '' if mbsize == 'all_free' else '+{0}M'.format(mbsize) ) ) bash_command = ' '.join( ['cat', fdisk_input.name, '|', 'fdisk', self.disk_device] ) try: Command.run( ['bash', '-c', bash_command] ) except Exception: # unfortunately fdisk reports that it can't read in the partition # table which I consider a bug in fdisk. However the table was # correctly created and therefore we continue. Problem is that we # are not able to detect real errors with the fdisk operation at # that point. log.debug('potential fdisk errors were ignored') self.set_flag(self.partition_id, type_name) if flags: for flag_name in flags: self.set_flag(self.partition_id, flag_name)
def _create_logical(self, name: str, mbsize: int, type_name: str, flags: List[str] = []) -> None: """ Create logical msdos partition """ self.partition_id += 1 fdisk_input = Temporary().new_file() with open(fdisk_input.name, 'w') as partition: log.debug('%s: fdisk: n %d cur_position +%sM w q', name, self.partition_id, format(mbsize)) partition.write('n\n{0}\n{1}\n{2}\nw\nq\n'.format( self.partition_id, '', '' if mbsize == 'all_free' else '+{0}M'.format(mbsize))) self._call_fdisk(fdisk_input.name) self._set_all_flags(type_name, flags)
def _sync_system_to_image( self, device_map: Dict, system: Any, system_boot: Optional[FileSystemBase], system_efi: Optional[FileSystemBase], system_spare: Optional[FileSystemBase], system_custom_parts: List[FileSystemBase]) -> None: log.info('Syncing system to image') if system_spare: log.info('--> Syncing spare partition data') system_spare.sync_data() for system_custom_part in system_custom_parts: log.info('--> Syncing custom partition(s) data') system_custom_part.sync_data() if system_efi: log.info('--> Syncing EFI boot data to EFI partition') system_efi.sync_data() if system_boot: log.info('--> Syncing boot data at extra partition') system_boot.sync_data(self._get_exclude_list_for_boot_data_sync()) log.info('--> Syncing root filesystem data') if self.root_filesystem_is_overlay: squashed_root_file = Temporary().new_file() squashed_root = FileSystemSquashFs( device_provider=DeviceProvider(), root_dir=self.root_dir, custom_args={ 'compression': self.xml_state.build_type.get_squashfscompression() }) squashed_root.create_on_file( filename=squashed_root_file.name, exclude=self._get_exclude_list_for_root_data_sync(device_map)) Command.run([ 'dd', 'if=%s' % squashed_root_file.name, 'of=%s' % device_map['readonly'].get_device() ]) else: system.sync_data( self._get_exclude_list_for_root_data_sync(device_map))