def test_sources_are_stored(self): r = Result() r.add("foo", Source("handle", "what they twote about foo", "twitter.com/linktotweet")) s = r.get_source_list("foo") self.assertEqual(s[0].handle, "handle") self.assertEqual(s[0].text, "what they twote about foo") self.assertEqual(s[0].link, "twitter.com/linktotweet")
def test_generate_pie_data(self): r = Result() r.add("foo", None) r.add("bar", None) r.add("foo", None) gchart = GChart() self.assertEqual(gchart.generate_pie_data(r), ['["foo",2],["bar",1]'])
class ContainerBuilder(object): """ container image builder """ def __init__(self, xml_state, target_dir, root_dir): self.root_dir = root_dir self.requested_container_name = xml_state.build_type.get_container() self.requested_container_type = xml_state.get_build_type_name() self.filename = ''.join([ target_dir, '/', xml_state.xml_data.get_name(), '.' + platform.machine(), '-' + xml_state.get_image_version(), '.', self.requested_container_type, '.tar.xz' ]) self.result = Result() def create(self): setup_options = {} if self.requested_container_name: setup_options['container_name'] = self.requested_container_name container_setup = ContainerSetup(self.requested_container_type, self.root_dir, setup_options) log.info('Setting up %s container', self.requested_container_type) log.info('--> Container name: %s', container_setup.get_container_name()) container_setup.setup() log.info('--> Creating container archive') container_image = ContainerImage(self.requested_container_type, self.root_dir) container_image.create(self.filename) self.result.add('container', self.filename) return self.result
def run_load_balancing(self, balance, hostIDs, properties_map): result = Result() request_id = str(uuid.uuid1()) log_adapter = \ utils.RequestAdapter(self._logger, {'method': 'run_load_balancing', 'request_id': request_id}) log_adapter.info("got request: %s" % balance) if balance not in self._balancers: warn_message = "Load balance requested but was not found: %s"\ % balance log_adapter.warning(warn_message) result.pluginError(balance, warn_message) return result runner = PythonMethodRunner(self._pluginDir, self._class_to_module_map[balance], balance, utils.BALANCE, (hostIDs, properties_map), request_id) runner.start() log_adapter.debug('Waiting for balance to finish') runner.join(30) log_adapter.info('returning: %s' % str(runner.getResults())) if runner.getResults() is None: result.add(['', []]) else: result.add(runner.getResults()) return result
class FileSystemBuilder(object): """ Filesystem image builder """ def __init__(self, xml_state, target_dir, root_dir): self.custom_args = None self.label = None self.root_dir = root_dir self.requested_image_type = xml_state.get_build_type_name() if self.requested_image_type == 'pxe': self.requested_filesystem = xml_state.build_type.get_filesystem() else: self.requested_filesystem = self.requested_image_type if not self.requested_filesystem: raise KiwiFileSystemSetupError( 'No filesystem configured in %s type' % self.requested_image_type) self.filename = ''.join([ target_dir, '/', xml_state.xml_data.get_name(), '.' + platform.machine(), '-' + xml_state.get_image_version(), '.', self.requested_filesystem ]) self.blocksize = xml_state.build_type.get_target_blocksize() self.filesystem_setup = FileSystemSetup(xml_state, root_dir) self.filesystems_no_device_node = ['squashfs'] self.result = Result() def create(self): log.info('Creating %s filesystem', self.requested_filesystem) supported_filesystems = Defaults.get_filesystem_image_types() if self.requested_filesystem not in supported_filesystems: raise KiwiFileSystemSetupError('Unknown filesystem: %s' % self.requested_filesystem) if self.requested_filesystem not in self.filesystems_no_device_node: self.__operate_on_loop() else: self.__operate_on_file() self.result.add('filesystem_image', self.filename) return self.result def __operate_on_loop(self): filesystem = None loop_provider = LoopDevice(self.filename, self.filesystem_setup.get_size_mbytes(), self.blocksize) loop_provider.create() filesystem = FileSystem(self.requested_filesystem, loop_provider, self.root_dir, self.custom_args) filesystem.create_on_device(self.label) log.info('--> Syncing data to filesystem on %s', loop_provider.get_device()) exclude_list = ['image', '.profile', '.kconfig', 'var/cache/kiwi'] filesystem.sync_data(exclude_list) def __operate_on_file(self): default_provider = DeviceProvider() filesystem = FileSystem(self.requested_filesystem, default_provider, self.root_dir, self.custom_args) filesystem.create_on_file(self.filename, self.label)
def test_sources_are_stored(self): r = Result() r.add( "foo", Source("handle", "what they twote about foo", "twitter.com/linktotweet")) s = r.get_source_list("foo") self.assertEqual(s[0].handle, "handle") self.assertEqual(s[0].text, "what they twote about foo") self.assertEqual(s[0].link, "twitter.com/linktotweet")
def collate_words(self, meme, sources): """Given a list of Sources and a Meme, populate a Result object with matching words""" regex = self._format_regex(meme) result = Result() for source in sources: match = re.search(regex, source.text, re.IGNORECASE) if match: result.add(match.groups()[0].lower(), source) #assumption: there was exactly one match return result
def generateResult(self): result = Result() dice = self.dice[:] for die in dice: result.add(die, die.chooseSide()) for upgrade in self.upgrades: result = upgrade.modifyResult(result) return result
def run_filters(self, filters, hostIDs, vmID, properties_map): result = Result() request_id = str(uuid.uuid1()) log_adapter = \ utils.RequestAdapter(self._logger, {'method': 'run_filters', 'request_id': request_id}) # run each filter in a process for robustness log_adapter.info("got request: %s" % str(filters)) avail_f, missing_f = utils.partition(filters, lambda f: f in self._filters) # handle missing filters for f in missing_f: log_adapter.warning("Filter requested but was not found: %s" % f) result.pluginError(f, "plugin not found: '%s'" % f) # Prepare a generator "list" of runners filterRunners = [ PythonMethodRunner( self._pluginDir, self._class_to_module_map[f], f, utils.FILTER, (hostIDs, vmID, properties_map), request_id) for f in avail_f ] for runner in filterRunners: runner.start() log_adapter.debug("Waiting for filters to finish") # TODO add timeout config if utils.waitOnGroup(filterRunners): log_adapter.warning("Waiting on filters timed out") log_adapter.debug("Aggregating results") filters_results = self.aggregate_filter_results(filterRunners, request_id) if filters_results is None: log_adapter.info('All filters failed, return the full list') result.error("all filters failed") filters_results = hostIDs result.add(filters_results) log_adapter.info('returning: %s' % str(filters_results)) return result
def run_cost_functions(self, cost_functions, hostIDs, vmID, properties_map): result = Result() request_id = str(uuid.uuid1()) log_adapter = \ utils.RequestAdapter(self._logger, {'method': 'run_cost_functions', 'request_id': request_id}) # run each filter in a process for robustness log_adapter.info("got request: %s" % str(cost_functions)) # Get the list of known and unknown score functions available_cost_f, missing_cost_f = \ utils.partition(cost_functions, lambda (n, w): n in self._scores) # Report the unknown functions for name, weight in missing_cost_f: log_adapter.warning("requested but was not found: %s" % name) result.pluginError(name, "plugin not found: '%s'" % name) # Prepare a generator "list" with runners and weights scoreRunners = [ (PythonMethodRunner( self._pluginDir, self._class_to_module_map[name], name, utils.SCORE, (hostIDs, vmID, properties_map), request_id), weight) for name, weight in available_cost_f ] for runner, _weight in scoreRunners: runner.start() log_adapter.debug("Waiting for scoring to finish") if utils.waitOnGroup([runner for runner, _weight in scoreRunners]): log_adapter.warning("Waiting on score functions timed out") result.error("Waiting on score functions timed out") log_adapter.debug("Aggregating results") results = self.aggregate_score_results(scoreRunners, request_id) result.add(results) log_adapter.info('returning: %s' % str(results)) return result
class ArchiveBuilder(object): """ root archive image builder """ def __init__(self, xml_state, target_dir, root_dir): self.root_dir = root_dir self.target_dir = target_dir self.xml_state = xml_state self.requested_archive_type = xml_state.get_build_type_name() self.result = Result() self.filename = self.__target_file_for('tar.xz') self.checksum = self.__target_file_for('md5') def create(self): supported_archives = Defaults.get_archive_image_types() if self.requested_archive_type not in supported_archives: raise KiwiArchiveSetupError( 'Unknown archive type: %s' % self.requested_archive_type ) if self.requested_archive_type == 'tbz': log.info('Creating XZ compressed tar archive') archive = ArchiveTar( self.__target_file_for('tar') ) archive.create_xz_compressed(self.root_dir) checksum = Checksum(self.filename) log.info('--> Creating archive checksum') checksum.md5(self.checksum) self.result.add( 'root_archive', self.filename ) self.result.add( 'root_archive_checksum', self.checksum ) return self.result def __target_file_for(self, suffix): return ''.join( [ self.target_dir, '/', self.xml_state.xml_data.get_name(), '.' + platform.machine(), '-' + self.xml_state.get_image_version(), '.', suffix ] )
def run_filters(self, filters, hostIDs, vmID, properties_map): result = Result() request_id = str(uuid.uuid1()) log_adapter = \ utils.RequestAdapter(self._logger, {'method': 'run_filters', 'request_id': request_id}) # run each filter in a process for robustness log_adapter.info("got request: %s" % str(filters)) avail_f, missing_f = utils.partition(filters, lambda f: f in self._filters) # handle missing filters for f in missing_f: log_adapter.warning("Filter requested but was not found: %s" % f) result.pluginError(f, "plugin not found: '%s'" % f) # Prepare a generator "list" of runners filterRunners = [ PythonMethodRunner(self._pluginDir, self._class_to_module_map[f], f, utils.FILTER, (hostIDs, vmID, properties_map), request_id) for f in avail_f ] for runner in filterRunners: runner.start() log_adapter.debug("Waiting for filters to finish") # TODO add timeout config if utils.waitOnGroup(filterRunners): log_adapter.warning("Waiting on filters timed out") log_adapter.debug("Aggregating results") filters_results = self.aggregate_filter_results( filterRunners, request_id) if filters_results is None: log_adapter.info('All filters failed, return the full list') result.error("all filters failed") filters_results = hostIDs result.add(filters_results) log_adapter.info('returning: %s' % str(filters_results)) return result
class ContainerBuilder(object): """ container image builder """ def __init__(self, xml_state, target_dir, root_dir): self.root_dir = root_dir self.requested_container_name = xml_state.build_type.get_container() self.requested_container_type = xml_state.get_build_type_name() self.filename = ''.join( [ target_dir, '/', xml_state.xml_data.get_name(), '.' + platform.machine(), '-' + xml_state.get_image_version(), '.', self.requested_container_type, '.tar.xz' ] ) self.result = Result() def create(self): setup_options = {} if self.requested_container_name: setup_options['container_name'] = self.requested_container_name container_setup = ContainerSetup( self.requested_container_type, self.root_dir, setup_options ) log.info('Setting up %s container', self.requested_container_type) log.info( '--> Container name: %s', container_setup.get_container_name() ) container_setup.setup() log.info( '--> Creating container archive' ) container_image = ContainerImage( self.requested_container_type, self.root_dir ) container_image.create( self.filename ) self.result.add( 'container', self.filename ) return self.result
def search(self, query, sleep_time=1, sleep_each=5, *args, **kwargs): """ sleep_time: int: sleeping time in seconds sleep_each: int: sleep after each `sleep_each` request for more arguments see `params_container.Container` __________ pbar bad behaviour if found < numResults pbar dies if interrupted verbose might be more stable we want to add param "progress=['bar', 'verbose']", dont we """ if sleep_each < 1: raise ValueError('Argument `sleep_each` must be >= 1') if isinstance(query, str): query = [query] if not isinstance(query, Iterable): raise TypeError(self.__type_except % type(query)) for q in query: _r = Result(q) parser = self.__corpus.PageParser(query=q, *args, **kwargs) q_desc = self.__pbar_desc % q for t in tqdm(parser.extract(), total=kwargs['numResults'], unit='docs', desc=q_desc): _r.add(t) if _r.N % sleep_each == 0: sleep(sleep_time) self.results.append(_r) if _r.N == 0: warnings.warn(self.__warn % q) self.unsuccessful.append(q) return self.results
def run_cost_functions(self, cost_functions, hostIDs, vmID, properties_map): result = Result() request_id = str(uuid.uuid1()) log_adapter = \ utils.RequestAdapter(self._logger, {'method': 'run_cost_functions', 'request_id': request_id}) # run each filter in a process for robustness log_adapter.info("got request: %s" % str(cost_functions)) # Get the list of known and unknown score functions available_cost_f, missing_cost_f = \ utils.partition(cost_functions, lambda (n, w): n in self._scores) # Report the unknown functions for name, weight in missing_cost_f: log_adapter.warning("requested but was not found: %s" % name) result.pluginError(name, "plugin not found: '%s'" % name) # Prepare a generator "list" with runners and weights scoreRunners = [(PythonMethodRunner(self._pluginDir, self._class_to_module_map[name], name, utils.SCORE, (hostIDs, vmID, properties_map), request_id), weight) for name, weight in available_cost_f] for runner, _weight in scoreRunners: runner.start() log_adapter.debug("Waiting for scoring to finish") if utils.waitOnGroup([runner for runner, _weight in scoreRunners]): log_adapter.warning("Waiting on score functions timed out") result.error("Waiting on score functions timed out") log_adapter.debug("Aggregating results") results = self.aggregate_score_results(scoreRunners, request_id) result.add(results) log_adapter.info('returning: %s' % str(results)) return result
def test_list_is_sorted(self): r = Result() r.add("baz", None) r.add("bar", None) r.add("foo", None) r.add("foo", None) r.add("baz", None) r.add("baz", None) self.assertEqual(r.get_list(), [("baz", 3), ("foo", 2), ("bar", 1)])
class ArchiveBuilder(object): """ root archive image builder """ def __init__(self, xml_state, target_dir, root_dir): self.root_dir = root_dir self.target_dir = target_dir self.xml_state = xml_state self.requested_archive_type = xml_state.get_build_type_name() self.result = Result() self.filename = self.__target_file_for('tar.xz') self.checksum = self.__target_file_for('md5') def create(self): supported_archives = Defaults.get_archive_image_types() if self.requested_archive_type not in supported_archives: raise KiwiArchiveSetupError('Unknown archive type: %s' % self.requested_archive_type) if self.requested_archive_type == 'tbz': log.info('Creating XZ compressed tar archive') archive = ArchiveTar(self.__target_file_for('tar')) archive.create_xz_compressed(self.root_dir) checksum = Checksum(self.filename) log.info('--> Creating archive checksum') checksum.md5(self.checksum) self.result.add('root_archive', self.filename) self.result.add('root_archive_checksum', self.checksum) return self.result def __target_file_for(self, suffix): return ''.join([ self.target_dir, '/', self.xml_state.xml_data.get_name(), '.' + platform.machine(), '-' + self.xml_state.get_image_version(), '.', suffix ])
class FileSystemBuilder(object): """ Filesystem image builder """ def __init__(self, xml_state, target_dir, root_dir): self.custom_args = None self.label = None self.root_dir = root_dir self.requested_image_type = xml_state.get_build_type_name() if self.requested_image_type == 'pxe': self.requested_filesystem = xml_state.build_type.get_filesystem() else: self.requested_filesystem = self.requested_image_type if not self.requested_filesystem: raise KiwiFileSystemSetupError( 'No filesystem configured in %s type' % self.requested_image_type ) self.filename = ''.join( [ target_dir, '/', xml_state.xml_data.get_name(), '.' + platform.machine(), '-' + xml_state.get_image_version(), '.', self.requested_filesystem ] ) self.blocksize = xml_state.build_type.get_target_blocksize() self.filesystem_setup = FileSystemSetup(xml_state, root_dir) self.filesystems_no_device_node = [ 'squashfs' ] self.result = Result() def create(self): log.info( 'Creating %s filesystem', self.requested_filesystem ) supported_filesystems = Defaults.get_filesystem_image_types() if self.requested_filesystem not in supported_filesystems: raise KiwiFileSystemSetupError( 'Unknown filesystem: %s' % self.requested_filesystem ) if self.requested_filesystem not in self.filesystems_no_device_node: self.__operate_on_loop() else: self.__operate_on_file() self.result.add( 'filesystem_image', self.filename ) return self.result def __operate_on_loop(self): filesystem = None loop_provider = LoopDevice( self.filename, self.filesystem_setup.get_size_mbytes(), self.blocksize ) loop_provider.create() filesystem = FileSystem( self.requested_filesystem, loop_provider, self.root_dir, self.custom_args ) filesystem.create_on_device(self.label) log.info( '--> Syncing data to filesystem on %s', loop_provider.get_device() ) exclude_list = [ 'image', '.profile', '.kconfig', 'var/cache/kiwi' ] filesystem.sync_data(exclude_list) def __operate_on_file(self): default_provider = DeviceProvider() filesystem = FileSystem( self.requested_filesystem, default_provider, self.root_dir, self.custom_args ) filesystem.create_on_file( self.filename, self.label )
def test_can_add_duplicate_words(self): r = Result() r.add("foo", None) r.add("foo", None) self.assertEqual(r.get_list(), [("foo", 2)]) self.assertEqual(r.count, 2)
class PxeBuilder(object): """ Filesystem based PXE image builder. This results in creating a boot image(initrd) plus its appropriate kernel files and the root filesystem image with a checksum. The result can be used within the kiwi PXE boot infrastructure """ def __init__(self, xml_state, target_dir, root_dir): self.target_dir = target_dir self.compressed = xml_state.build_type.get_compressed() self.image_name = xml_state.xml_data.get_name() self.machine = xml_state.get_build_type_machine_section() self.pxedeploy = xml_state.get_build_type_pxedeploy_section() self.filesystem = FileSystemBuilder( xml_state, target_dir, root_dir ) self.system_setup = SystemSetup( xml_state=xml_state, description_dir=None, root_dir=root_dir ) self.boot_image_task = BootImageTask( 'kiwi', xml_state, target_dir ) self.kernel_filename = None self.hypervisor_filename = None self.result = Result() def create(self): log.info('Creating PXE root filesystem image') self.filesystem.create() self.image = self.filesystem.filename if self.compressed: log.info('xz compressing root filesystem image') compress = Compress(self.image) compress.xz() self.image = compress.compressed_filename log.info('Creating PXE root filesystem MD5 checksum') self.filesystem_checksum = self.filesystem.filename + '.md5' checksum = Checksum(self.image) checksum.md5(self.filesystem_checksum) # prepare boot(initrd) root system log.info('Creating PXE boot image') self.boot_image_task.prepare() # export modprobe configuration to boot image self.system_setup.export_modprobe_setup( self.boot_image_task.boot_root_directory ) # extract kernel from boot(initrd) root system kernel = Kernel(self.boot_image_task.boot_root_directory) kernel_data = kernel.get_kernel() if kernel_data: self.kernel_filename = ''.join( [self.image_name, '-', kernel_data.version, '.kernel'] ) kernel.copy_kernel( self.target_dir, self.kernel_filename ) else: raise KiwiPxeBootImageError( 'No kernel in boot image tree %s found' % self.boot_image_task.boot_root_directory ) # extract hypervisor from boot(initrd) root system if self.machine and self.machine.get_domain() == 'dom0': kernel_data = kernel.get_xen_hypervisor() if kernel_data: self.hypervisor_filename = ''.join( [self.image_name, '-', kernel_data.name] ) kernel.copy_xen_hypervisor( self.target_dir, self.hypervisor_filename ) self.result.add( 'xen_hypervisor', self.hypervisor_filename ) else: raise KiwiPxeBootImageError( 'No hypervisor in boot image tree %s found' % self.boot_image_task.boot_root_directory ) # create initrd for pxe boot self.boot_image_task.create_initrd() self.result.add( 'kernel', self.kernel_filename ) self.result.add( 'initrd', self.boot_image_task.initrd_filename ) self.result.add( 'filesystem_image', self.image ) self.result.add( 'filesystem_md5', self.filesystem_checksum ) if self.pxedeploy: log.warning( 'Creation of client config file from pxedeploy not implemented' ) return self.result
class DiskBuilder(object): """ Disk image builder """ def __init__(self, xml_state, target_dir, root_dir): self.root_dir = root_dir self.target_dir = target_dir self.xml_state = xml_state self.custom_filesystem_args = None self.build_type_name = xml_state.get_build_type_name() self.image_format = xml_state.build_type.get_format() self.install_iso = xml_state.build_type.get_installiso() self.install_stick = xml_state.build_type.get_installstick() self.install_pxe = xml_state.build_type.get_installpxe() self.blocksize = xml_state.build_type.get_target_blocksize() self.volume_manager_name = xml_state.get_volume_management() self.volumes = xml_state.get_volumes() self.volume_group_name = xml_state.get_volume_group_name() self.mdraid = xml_state.build_type.get_mdraid() self.luks = xml_state.build_type.get_luks() self.luks_os = xml_state.build_type.get_luksOS() self.machine = xml_state.get_build_type_machine_section() self.requested_filesystem = xml_state.build_type.get_filesystem() self.requested_boot_filesystem = \ xml_state.build_type.get_bootfilesystem() self.bootloader = xml_state.build_type.get_bootloader() self.disk_setup = DiskSetup( xml_state, root_dir ) self.boot_image_task = BootImageTask( 'kiwi', xml_state, target_dir ) self.firmware = FirmWare( xml_state ) self.system_setup = SystemSetup( xml_state=xml_state, description_dir=None, root_dir=self.root_dir ) self.diskname = ''.join( [ target_dir, '/', xml_state.xml_data.get_name(), '.' + platform.machine(), '-' + xml_state.get_image_version(), '.raw' ] ) self.install_media = self.__install_image_requested() self.install_image = InstallImageBuilder( xml_state, target_dir, self.boot_image_task ) # an instance of a class with the sync_data capability # representing the entire image system except for the boot/ area # which could live on another part of the disk self.system = None # an instance of a class with the sync_data capability # representing the boot/ area of the disk if not part of # self.system self.system_boot = None # an instance of a class with the sync_data capability # representing the boot/efi area of the disk self.system_efi = None # result store self.result = Result() def create(self): if self.install_media and self.build_type_name != 'oem': raise KiwiInstallMediaError( 'Install media requires oem type setup, got %s' % self.build_type_name ) # setup recovery archive, cleanup and create archive if requested self.system_setup.create_recovery_archive() # prepare boot(initrd) root system log.info('Preparing boot system') self.boot_image_task.prepare() # precalculate needed disk size disksize_mbytes = self.disk_setup.get_disksize_mbytes() # create the disk log.info('Creating raw disk image %s', self.diskname) loop_provider = LoopDevice( self.diskname, disksize_mbytes, self.blocksize ) loop_provider.create() self.disk = Disk( self.firmware.get_partition_table_type(), loop_provider ) # create the bootloader instance self.bootloader_config = BootLoaderConfig( self.bootloader, self.xml_state, self.root_dir, {'targetbase': loop_provider.get_device()} ) # create disk partitions and instance device map device_map = self.__build_and_map_disk_partitions() # create raid on current root device if requested if self.mdraid: self.raid_root = RaidDevice(device_map['root']) self.raid_root.create_degraded_raid(raid_level=self.mdraid) device_map['root'] = self.raid_root.get_device() # create luks on current root device if requested if self.luks: self.luks_root = LuksDevice(device_map['root']) self.luks_root.create_crypto_luks( passphrase=self.luks, os=self.luks_os ) device_map['root'] = self.luks_root.get_device() # create filesystems on boot partition(s) if any self.__build_boot_filesystems(device_map) # create volumes and filesystems for root system if self.volume_manager_name: volume_manager_custom_parameters = { 'root_filesystem_args': self.custom_filesystem_args, 'root_label': self.disk_setup.get_root_label(), 'root_is_snapshot': self.xml_state.build_type.get_btrfs_root_is_snapshot() } volume_manager = VolumeManager( self.volume_manager_name, device_map['root'], self.root_dir + '/', self.volumes, volume_manager_custom_parameters ) volume_manager.setup( self.volume_group_name ) volume_manager.create_volumes( self.requested_filesystem ) volume_manager.mount_volumes() self.system = volume_manager device_map['root'] = volume_manager.get_device()['root'] else: log.info( 'Creating root(%s) filesystem on %s', self.requested_filesystem, device_map['root'].get_device() ) filesystem = FileSystem( self.requested_filesystem, device_map['root'], self.root_dir + '/', self.custom_filesystem_args ) filesystem.create_on_device( label=self.disk_setup.get_root_label() ) self.system = filesystem # create a random image identifier self.mbrid = ImageIdentifier() self.mbrid.calculate_id() # create first stage metadata to boot image self.__write_partition_id_config_to_boot_image() self.__write_recovery_metadata_to_boot_image() self.__write_raid_config_to_boot_image() self.system_setup.export_modprobe_setup( self.boot_image_task.boot_root_directory ) # create first stage metadata to system image self.__write_image_identifier_to_system_image() self.__write_crypttab_to_system_image() # create initrd cpio archive self.boot_image_task.create_initrd(self.mbrid) # create second stage metadata to boot self.__copy_first_boot_files_to_system_image() self.__write_bootloader_config_to_system_image(device_map) self.mbrid.write_to_disk( self.disk.storage_provider ) # syncing system data to disk image log.info('Syncing system to image') if self.system_efi: log.info('--> Syncing EFI boot data to EFI partition') self.system_efi.sync_data() if self.system_boot: log.info('--> Syncing boot data at extra partition') self.system_boot.sync_data( self.__get_exclude_list_for_boot_data_sync() ) log.info('--> Syncing root filesystem data') self.system.sync_data( self.__get_exclude_list_for_root_data_sync(device_map) ) # install boot loader self.__install_bootloader(device_map) self.result.add( 'disk_image', self.diskname ) # create install media if requested if self.install_media: if self.image_format: log.warning('Install image requested, ignoring disk format') if self.install_iso or self.install_stick: log.info('Creating hybrid ISO installation image') self.install_image.create_install_iso() self.result.add( 'installation_image', self.install_image.isoname ) if self.install_pxe: log.info('Creating PXE installation archive') self.install_image.create_install_pxe_archive() self.result.add( 'installation_pxe_archive', self.install_image.pxename ) # create disk image format if requested elif self.image_format: log.info('Creating %s Disk Format', self.image_format) disk_format = DiskFormat( self.image_format, self.xml_state, self.root_dir, self.target_dir ) disk_format.create_image_format() self.result.add( 'disk_format_image', self.target_dir + '/' + disk_format.get_target_name_for_format( self.image_format ) ) return self.result def __install_image_requested(self): if self.install_iso or self.install_stick or self.install_pxe: return True def __get_exclude_list_for_root_data_sync(self, device_map): exclude_list = [ 'image', '.profile', '.kconfig', 'var/cache/kiwi' ] if 'boot' in device_map and self.bootloader == 'grub2_s390x_emu': exclude_list.append('boot/zipl/*') exclude_list.append('boot/zipl/.*') elif 'boot' in device_map: exclude_list.append('boot/*') exclude_list.append('boot/.*') return exclude_list def __get_exclude_list_for_boot_data_sync(self): return ['efi/*'] def __build_boot_filesystems(self, device_map): if 'efi' in device_map: log.info( 'Creating EFI(fat16) filesystem on %s', device_map['efi'].get_device() ) filesystem = FileSystem( 'fat16', device_map['efi'], self.root_dir + '/boot/efi/' ) filesystem.create_on_device( label=self.disk_setup.get_efi_label() ) self.system_efi = filesystem if 'boot' in device_map: boot_filesystem = self.requested_boot_filesystem if not boot_filesystem: boot_filesystem = self.requested_filesystem boot_directory = self.root_dir + '/boot/' if self.bootloader == 'grub2_s390x_emu': boot_directory = self.root_dir + '/boot/zipl/' boot_filesystem = 'ext2' log.info( 'Creating boot(%s) filesystem on %s', boot_filesystem, device_map['boot'].get_device() ) filesystem = FileSystem( boot_filesystem, device_map['boot'], boot_directory ) filesystem.create_on_device( label=self.disk_setup.get_boot_label() ) self.system_boot = filesystem def __build_and_map_disk_partitions(self): self.disk.wipe() if self.firmware.legacy_bios_mode(): log.info('--> creating EFI CSM(legacy bios) partition') self.disk.create_efi_csm_partition( self.firmware.get_legacy_bios_partition_size() ) if self.firmware.efi_mode(): log.info('--> creating EFI partition') self.disk.create_efi_partition( self.firmware.get_efi_partition_size() ) if self.disk_setup.need_boot_partition(): log.info('--> creating boot partition') self.disk.create_boot_partition( self.disk_setup.boot_partition_size() ) if self.volume_manager_name and self.volume_manager_name == 'lvm': log.info('--> creating LVM root partition') self.disk.create_root_lvm_partition('all_free') elif self.mdraid: log.info('--> creating mdraid root partition') self.disk.create_root_raid_partition('all_free') else: log.info('--> creating root partition') self.disk.create_root_partition('all_free') if self.firmware.bios_mode(): log.info('--> setting active flag to primary boot partition') self.disk.activate_boot_partition() self.disk.map_partitions() return self.disk.get_device() def __write_partition_id_config_to_boot_image(self): log.info('Creating config.partids in boot system') filename = self.boot_image_task.boot_root_directory + '/config.partids' partition_id_map = self.disk.get_partition_id_map() with open(filename, 'w') as partids: for id_name, id_value in partition_id_map.iteritems(): partids.write('%s="%s"\n' % (id_name, id_value)) def __write_raid_config_to_boot_image(self): if self.mdraid: log.info('Creating etc/mdadm.conf in boot system') self.raid_root.create_raid_config( self.boot_image_task.boot_root_directory + '/etc/mdadm.conf' ) def __write_crypttab_to_system_image(self): if self.luks: log.info('Creating etc/crypttab') self.luks_root.create_crypttab( self.root_dir + '/etc/crypttab' ) def __write_image_identifier_to_system_image(self): log.info('Creating image identifier: %s', self.mbrid.get_id()) self.mbrid.write( self.root_dir + '/boot/mbrid' ) def __write_recovery_metadata_to_boot_image(self): if os.path.exists(self.root_dir + '/recovery.partition.size'): log.info('Copying recovery metadata to boot image') Command.run( [ 'cp', self.root_dir + '/recovery.partition.size', self.boot_image_task.boot_root_directory ] ) def __write_bootloader_config_to_system_image(self, device_map): log.info('Creating %s bootloader configuration', self.bootloader) boot_device = device_map['root'] if 'boot' in device_map: boot_device = device_map['boot'] partition_id_map = self.disk.get_partition_id_map() boot_partition_id = partition_id_map['kiwi_RootPart'] if 'kiwi_BootPart' in partition_id_map: boot_partition_id = partition_id_map['kiwi_BootPart'] boot_uuid = self.disk.get_uuid( boot_device.get_device() ) self.bootloader_config.setup_disk_boot_images(boot_uuid) self.bootloader_config.setup_disk_image_config(boot_uuid) self.bootloader_config.write() self.system_setup.call_edit_boot_config_script( self.requested_filesystem, boot_partition_id ) def __install_bootloader(self, device_map): boot_device = device_map['root'] custom_install_arguments = {} if 'boot' in device_map: boot_device = device_map['boot'] custom_install_arguments['boot_device'] = boot_device.get_device() bootloader = BootLoaderInstall( self.bootloader, self.root_dir, self.disk.storage_provider, custom_install_arguments ) bootloader.install() self.system_setup.call_edit_boot_install_script( self.diskname, boot_device.get_device() ) def __copy_first_boot_files_to_system_image(self): log.info('Copy boot files to system image') kernel = Kernel(self.boot_image_task.boot_root_directory) if kernel.get_kernel(): log.info('--> boot image kernel as first boot linux.vmx') kernel.copy_kernel( self.root_dir, '/boot/linux.vmx' ) else: raise KiwiDiskBootImageError( 'No kernel in boot image tree %s found' % self.boot_image_task.boot_root_directory ) if self.machine and self.machine.get_domain() == 'dom0': if kernel.get_xen_hypervisor(): log.info('--> boot image Xen hypervisor as xen.gz') kernel.copy_xen_hypervisor( self.root_dir, '/boot/xen.gz' ) else: raise KiwiDiskBootImageError( 'No hypervisor in boot image tree %s found' % self.boot_image_task.boot_root_directory ) log.info('--> initrd archive as first boot initrd.vmx') Command.run( [ 'mv', self.boot_image_task.initrd_filename, self.root_dir + '/boot/initrd.vmx' ] )
class LiveImageBuilder(object): """ Live image builder """ def __init__(self, xml_state, target_dir, root_dir): self.media_dir = None self.arch = platform.machine() self.root_dir = root_dir self.target_dir = target_dir self.xml_state = xml_state self.live_type = xml_state.build_type.get_flags() self.types = Defaults.get_live_iso_types() self.hybrid = xml_state.build_type.get_hybrid() self.volume_id = xml_state.build_type.get_volid() self.machine = xml_state.get_build_type_machine_section() self.mbrid = ImageIdentifier() self.mbrid.calculate_id() if not self.live_type: self.live_type = Defaults.get_default_live_iso_type() self.boot_image_task = BootImageTask('kiwi', xml_state, target_dir) self.firmware = FirmWare(xml_state) self.system_setup = SystemSetup(xml_state=xml_state, description_dir=None, root_dir=self.root_dir) self.isoname = ''.join([ target_dir, '/', xml_state.xml_data.get_name(), '.' + platform.machine(), '-' + xml_state.get_image_version(), '.iso' ]) self.live_image_file = ''.join([ target_dir, '/', xml_state.xml_data.get_name(), '-read-only.', self.arch, '-', xml_state.get_image_version() ]) self.result = Result() def create(self): # media dir to store CD contents self.media_dir = mkdtemp(prefix='live-media.', dir=self.target_dir) rootsize = SystemSize(self.media_dir) # custom iso metadata log.info('Using following live ISO metadata:') log.info('--> Application id: %s', self.mbrid.get_id()) log.info('--> Publisher: %s', Defaults.get_publisher()) custom_iso_args = [ '-A', self.mbrid.get_id(), '-p', '"' + Defaults.get_preparer() + '"', '-publisher', '"' + Defaults.get_publisher() + '"', ] if self.volume_id: log.info('--> Volume id: %s', self.volume_id) custom_iso_args.append('-V') custom_iso_args.append('"' + self.volume_id + '"') # prepare boot(initrd) root system log.info('Preparing live ISO boot system') self.boot_image_task.prepare() # export modprobe configuration to boot image self.system_setup.export_modprobe_setup( self.boot_image_task.boot_root_directory) # pack system into live boot structure log.info('Packing system into live ISO type: %s', self.live_type) if self.live_type in self.types: live_type_image = FileSystem(name=self.types[self.live_type], device_provider=None, root_dir=self.root_dir) live_type_image.create_on_file(self.live_image_file) Command.run(['mv', self.live_image_file, self.media_dir]) self.__create_live_iso_client_config(self.live_type) else: raise KiwiLiveBootImageError('live ISO type "%s" not supported' % self.live_type) # setup bootloader config to boot the ISO via isolinux log.info('Setting up isolinux bootloader configuration') bootloader_config_isolinux = BootLoaderConfig('isolinux', self.xml_state, self.media_dir) bootloader_config_isolinux.setup_live_boot_images( mbrid=None, lookup_path=self.boot_image_task.boot_root_directory) bootloader_config_isolinux.setup_live_image_config(mbrid=None) bootloader_config_isolinux.write() # setup bootloader config to boot the ISO via EFI if self.firmware.efi_mode(): log.info('Setting up EFI grub bootloader configuration') bootloader_config_grub = BootLoaderConfig('grub2', self.xml_state, self.media_dir) bootloader_config_grub.setup_live_boot_images( mbrid=self.mbrid, lookup_path=self.boot_image_task.boot_root_directory) bootloader_config_grub.setup_live_image_config(mbrid=self.mbrid) bootloader_config_grub.write() # create initrd for live image log.info('Creating live ISO boot image') self.__create_live_iso_kernel_and_initrd() # calculate size and decide if we need UDF if rootsize.accumulate_mbyte_file_sizes() > 4096: log.info('ISO exceeds 4G size, using UDF filesystem') custom_iso_args.append('-allow-limited-size') custom_iso_args.append('-udf') # create iso filesystem from media_dir log.info('Creating live ISO image') iso_image = FileSystemIsoFs(device_provider=None, root_dir=self.media_dir, custom_args=custom_iso_args) iso_header_offset = iso_image.create_on_file(self.isoname) # make it hybrid if self.hybrid: Iso.create_hybrid(iso_header_offset, self.mbrid, self.isoname) self.result.add('live_image', self.isoname) return self.result def __create_live_iso_kernel_and_initrd(self): boot_path = self.media_dir + '/boot/x86_64/loader' Path.create(boot_path) kernel = Kernel(self.boot_image_task.boot_root_directory) if kernel.get_kernel(): kernel.copy_kernel(boot_path, '/linux') else: raise KiwiLiveBootImageError( 'No kernel in boot image tree %s found' % self.boot_image_task.boot_root_directory) if self.machine and self.machine.get_domain() == 'dom0': if kernel.get_xen_hypervisor(): kernel.copy_xen_hypervisor(boot_path, '/xen.gz') else: raise KiwiLiveBootImageError( 'No hypervisor in boot image tree %s found' % self.boot_image_task.boot_root_directory) self.boot_image_task.create_initrd(self.mbrid) Command.run([ 'mv', self.boot_image_task.initrd_filename, boot_path + '/initrd' ]) def __create_live_iso_client_config(self, iso_type): """ Setup IMAGE and UNIONFS_CONFIG variables as they are used in the kiwi isoboot code. Variable contents: + IMAGE=target_device;live_iso_name_definition + UNIONFS_CONFIG=rw_device,ro_device,union_type If no real block device is used or can be predefined the word 'loop' is set as a placeholder or indicator to use a loop device. For more details please refer to the kiwi shell boot code """ iso_client_config_file = self.media_dir + '/config.isoclient' iso_client_params = Defaults.get_live_iso_client_parameters() (system_device, union_device, union_type) = iso_client_params[iso_type] with open(iso_client_config_file, 'w') as config: config.write('IMAGE="%s;%s.%s;%s"\n' % (system_device, self.xml_state.xml_data.get_name(), self.arch, self.xml_state.get_image_version())) config.write('UNIONFS_CONFIG="%s,loop,%s"\n' % (union_device, union_type)) def __del__(self): if self.media_dir: log.info('Cleaning up %s instance', type(self).__name__) Path.wipe(self.media_dir)
def predict(self, word, audio_file, pronunciation=''): result = Result(word, pronunciation) if self.explore: grapheme_n_best = self.gm.predict(word, self.n) result.add('grapheme', [Hypothesis(x) for x in grapheme_n_best[0:self.output_n]]) acoustic_n_best = self.am.predict(audio_file, self.n, word, grapheme_n_best) result.add('acoustic', [Hypothesis(x) for x in acoustic_n_best[0:self.output_n]]) for gamma in self.gamma_range: full_r = self.interpolate(grapheme_n_best, acoustic_n_best, gamma) result.add('gamma' + str(gamma), [Hypothesis(x) for x in full_r[0:self.output_n]]) else: grapheme_n_best = self.gm.predict(word, self.n) result.add('grapheme', [Hypothesis(x) for x in grapheme_n_best[0:self.output_n]]) #print 'Grapheme' acoustic_results = self.am.predict(audio_file, self.n)[0:self.output_n] result.add('acoustic', [Hypothesis(x) for x in acoustic_results]) #print 'Acoustic' n_best = self.am.predict(audio_file, self.n, word, grapheme_n_best) #result.add('full', [Hypothesis(x) for x in n_best[0:self.output_n]]) full_r = self.interpolate(grapheme_n_best, n_best, self.gamma) result.add('full', [Hypothesis(x) for x in full_r[0:self.output_n]]) #print 'Full' return result
class FileAnalyzer(): ''' Analizes files according to dictionaries ''' def __init__(self, filename, chunksize=4 * utils.MB): ''' Initiates a file analyzer \nArgs: \n\t filename (str): the filename to analize \n\t chunksize (int): the size of chunks to be read at a time from the file \t(default is 4,194,304 byts (4MB)) ''' self.filename = filename self.res = Result(f"{filename}_patterns.json") self.chunksize = chunksize self.match_ranges = {} def find_patterns(self, dic, upto_offset=0, repeating=0) -> None: ''' Find the patterns from the dictionary in the file upto the offset given, add them to the result \nArgs: \n\tdic (dict): The pattern dictionary \n\t`upto_offset` (int): the offset of the last byte to be read \t (default is 0(read the whole file)) \n\t repeating (int): the shortest repeated byte sequance to write down \t (default is 0(don't search for repeated bytes)) ''' buff = Buffer(self.chunksize * 2) # 2 chunks per buffer # in case of a match seperating between two chunks if repeating: repet_ex = re.compile(r"(..)\1{" + str(repeating) + ",}") # re searches faster for pre compiled expresions regex = {re.compile(key): val for key, val in dic.items()} for i, chunk in enumerate(utils.file_to_chunks_generator(self.filename, chunksize=self.chunksize, upto_offset=upto_offset)): buff.update(chunk) new_ranges = buff.pattern_ranges_in_buffer(dic) self.update_ranges(new_ranges, i) if repeating: for match in re.finditer(repet_ex, "".join(chunk)): self.res.add("reapiting byte", i+match.start()//2, match.end()//2-match.start()//2, repeater=utils.format_byte(hex(int(match.groups()[0])))) self.res.add_from_dict(self.match_ranges) def write_results(self) -> None: self.res.write_to_file() def update_ranges(self, new_ranges, current_chunk_index) -> None: '''' updates the current ranges according to the new ranges \nArgs: \n\t `new_ranges` (dict): a dictionary from the \t name to a dictionary of the spans of the matches \n\t current_chunk_index (int) ''' for name, rng_list in new_ranges.items(): for rng in rng_list: start = rng["start"] + \ (current_chunk_index-3) * self.chunksize//2 length = (rng["end"] - rng["start"]) if name not in self.match_ranges: self.match_ranges[name] = {} if start in self.match_ranges[name]: length = max(length, self.match_ranges[name][start]) self.match_ranges[name][start] = length
data = db.filter(UPDATE_TIME=dates_train, HOUR_ID=hour, SERVER_NAME=server) if len(data) == 0: continue x_train = [row['UPDATE_TIME'] for row in data] y1_train = [row['BANDWIDTH_TOTAL'] for row in data] y2_train = [row['MAX_USER'] for row in data] dates_predict = list(week_range(start_date_predict, end_date_predict, weekday)) model = Model() model.fit(x_train, y1_train, y2_train) y1_predict, y2_predict = model.predict(dates_predict) for date, y1, y2 in zip(dates_predict, y1_predict, y2_predict): result.add(date, hour, server, y1, y2) pbar.close() print() submission = Submission() print('Prepare submission...') with open('data/test_id.csv', 'r') as test_file: reader = csv.DictReader(test_file) for row in tqdm(reader): test_id = row['id'] update_time = datetime.strptime(row['UPDATE_TIME'], '%Y-%m-%d').date() hour = int(row['HOUR_ID']) server = row['SERVER_NAME'] bandwidth, max_user = result.get(update_time, hour, server)
def test_can_add_word(self): r = Result() r.add("foo", None) self.assertEqual(r.get_list(), [("foo", 1)]) self.assertEqual(r.count, 1)
class LiveImageBuilder(object): """ Live image builder """ def __init__(self, xml_state, target_dir, root_dir): self.media_dir = None self.arch = platform.machine() self.root_dir = root_dir self.target_dir = target_dir self.xml_state = xml_state self.live_type = xml_state.build_type.get_flags() self.types = Defaults.get_live_iso_types() self.hybrid = xml_state.build_type.get_hybrid() self.volume_id = xml_state.build_type.get_volid() self.machine = xml_state.get_build_type_machine_section() self.mbrid = ImageIdentifier() self.mbrid.calculate_id() if not self.live_type: self.live_type = Defaults.get_default_live_iso_type() self.boot_image_task = BootImageTask( 'kiwi', xml_state, target_dir ) self.firmware = FirmWare( xml_state ) self.system_setup = SystemSetup( xml_state=xml_state, description_dir=None, root_dir=self.root_dir ) self.isoname = ''.join( [ target_dir, '/', xml_state.xml_data.get_name(), '.' + platform.machine(), '-' + xml_state.get_image_version(), '.iso' ] ) self.live_image_file = ''.join( [ target_dir, '/', xml_state.xml_data.get_name(), '-read-only.', self.arch, '-', xml_state.get_image_version() ] ) self.result = Result() def create(self): # media dir to store CD contents self.media_dir = mkdtemp( prefix='live-media.', dir=self.target_dir ) rootsize = SystemSize(self.media_dir) # custom iso metadata log.info('Using following live ISO metadata:') log.info('--> Application id: %s', self.mbrid.get_id()) log.info('--> Publisher: %s', Defaults.get_publisher()) custom_iso_args = [ '-A', self.mbrid.get_id(), '-p', '"' + Defaults.get_preparer() + '"', '-publisher', '"' + Defaults.get_publisher() + '"', ] if self.volume_id: log.info('--> Volume id: %s', self.volume_id) custom_iso_args.append('-V') custom_iso_args.append('"' + self.volume_id + '"') # prepare boot(initrd) root system log.info('Preparing live ISO boot system') self.boot_image_task.prepare() # export modprobe configuration to boot image self.system_setup.export_modprobe_setup( self.boot_image_task.boot_root_directory ) # pack system into live boot structure log.info('Packing system into live ISO type: %s', self.live_type) if self.live_type in self.types: live_type_image = FileSystem( name=self.types[self.live_type], device_provider=None, root_dir=self.root_dir ) live_type_image.create_on_file(self.live_image_file) Command.run( ['mv', self.live_image_file, self.media_dir] ) self.__create_live_iso_client_config(self.live_type) else: raise KiwiLiveBootImageError( 'live ISO type "%s" not supported' % self.live_type ) # setup bootloader config to boot the ISO via isolinux log.info('Setting up isolinux bootloader configuration') bootloader_config_isolinux = BootLoaderConfig( 'isolinux', self.xml_state, self.media_dir ) bootloader_config_isolinux.setup_live_boot_images( mbrid=None, lookup_path=self.boot_image_task.boot_root_directory ) bootloader_config_isolinux.setup_live_image_config( mbrid=None ) bootloader_config_isolinux.write() # setup bootloader config to boot the ISO via EFI if self.firmware.efi_mode(): log.info('Setting up EFI grub bootloader configuration') bootloader_config_grub = BootLoaderConfig( 'grub2', self.xml_state, self.media_dir ) bootloader_config_grub.setup_live_boot_images( mbrid=self.mbrid, lookup_path=self.boot_image_task.boot_root_directory ) bootloader_config_grub.setup_live_image_config( mbrid=self.mbrid ) bootloader_config_grub.write() # create initrd for live image log.info('Creating live ISO boot image') self.__create_live_iso_kernel_and_initrd() # calculate size and decide if we need UDF if rootsize.accumulate_mbyte_file_sizes() > 4096: log.info('ISO exceeds 4G size, using UDF filesystem') custom_iso_args.append('-allow-limited-size') custom_iso_args.append('-udf') # create iso filesystem from media_dir log.info('Creating live ISO image') iso_image = FileSystemIsoFs( device_provider=None, root_dir=self.media_dir, custom_args=custom_iso_args ) iso_header_offset = iso_image.create_on_file(self.isoname) # make it hybrid if self.hybrid: Iso.create_hybrid( iso_header_offset, self.mbrid, self.isoname ) self.result.add( 'live_image', self.isoname ) return self.result def __create_live_iso_kernel_and_initrd(self): boot_path = self.media_dir + '/boot/x86_64/loader' Path.create(boot_path) kernel = Kernel(self.boot_image_task.boot_root_directory) if kernel.get_kernel(): kernel.copy_kernel(boot_path, '/linux') else: raise KiwiLiveBootImageError( 'No kernel in boot image tree %s found' % self.boot_image_task.boot_root_directory ) if self.machine and self.machine.get_domain() == 'dom0': if kernel.get_xen_hypervisor(): kernel.copy_xen_hypervisor(boot_path, '/xen.gz') else: raise KiwiLiveBootImageError( 'No hypervisor in boot image tree %s found' % self.boot_image_task.boot_root_directory ) self.boot_image_task.create_initrd(self.mbrid) Command.run( [ 'mv', self.boot_image_task.initrd_filename, boot_path + '/initrd' ] ) def __create_live_iso_client_config(self, iso_type): """ Setup IMAGE and UNIONFS_CONFIG variables as they are used in the kiwi isoboot code. Variable contents: + IMAGE=target_device;live_iso_name_definition + UNIONFS_CONFIG=rw_device,ro_device,union_type If no real block device is used or can be predefined the word 'loop' is set as a placeholder or indicator to use a loop device. For more details please refer to the kiwi shell boot code """ iso_client_config_file = self.media_dir + '/config.isoclient' iso_client_params = Defaults.get_live_iso_client_parameters() (system_device, union_device, union_type) = iso_client_params[iso_type] with open(iso_client_config_file, 'w') as config: config.write( 'IMAGE="%s;%s.%s;%s"\n' % ( system_device, self.xml_state.xml_data.get_name(), self.arch, self.xml_state.get_image_version() ) ) config.write( 'UNIONFS_CONFIG="%s,loop,%s"\n' % (union_device, union_type) ) def __del__(self): if self.media_dir: log.info('Cleaning up %s instance', type(self).__name__) Path.wipe(self.media_dir)
class PxeBuilder(object): """ Filesystem based PXE image builder. This results in creating a boot image(initrd) plus its appropriate kernel files and the root filesystem image with a checksum. The result can be used within the kiwi PXE boot infrastructure """ def __init__(self, xml_state, target_dir, root_dir): self.target_dir = target_dir self.compressed = xml_state.build_type.get_compressed() self.image_name = xml_state.xml_data.get_name() self.machine = xml_state.get_build_type_machine_section() self.pxedeploy = xml_state.get_build_type_pxedeploy_section() self.filesystem = FileSystemBuilder(xml_state, target_dir, root_dir) self.system_setup = SystemSetup(xml_state=xml_state, description_dir=None, root_dir=root_dir) self.boot_image_task = BootImageTask('kiwi', xml_state, target_dir) self.kernel_filename = None self.hypervisor_filename = None self.result = Result() def create(self): log.info('Creating PXE root filesystem image') self.filesystem.create() self.image = self.filesystem.filename if self.compressed: log.info('xz compressing root filesystem image') compress = Compress(self.image) compress.xz() self.image = compress.compressed_filename log.info('Creating PXE root filesystem MD5 checksum') self.filesystem_checksum = self.filesystem.filename + '.md5' checksum = Checksum(self.image) checksum.md5(self.filesystem_checksum) # prepare boot(initrd) root system log.info('Creating PXE boot image') self.boot_image_task.prepare() # export modprobe configuration to boot image self.system_setup.export_modprobe_setup( self.boot_image_task.boot_root_directory) # extract kernel from boot(initrd) root system kernel = Kernel(self.boot_image_task.boot_root_directory) kernel_data = kernel.get_kernel() if kernel_data: self.kernel_filename = ''.join( [self.image_name, '-', kernel_data.version, '.kernel']) kernel.copy_kernel(self.target_dir, self.kernel_filename) else: raise KiwiPxeBootImageError( 'No kernel in boot image tree %s found' % self.boot_image_task.boot_root_directory) # extract hypervisor from boot(initrd) root system if self.machine and self.machine.get_domain() == 'dom0': kernel_data = kernel.get_xen_hypervisor() if kernel_data: self.hypervisor_filename = ''.join( [self.image_name, '-', kernel_data.name]) kernel.copy_xen_hypervisor(self.target_dir, self.hypervisor_filename) self.result.add('xen_hypervisor', self.hypervisor_filename) else: raise KiwiPxeBootImageError( 'No hypervisor in boot image tree %s found' % self.boot_image_task.boot_root_directory) # create initrd for pxe boot self.boot_image_task.create_initrd() self.result.add('kernel', self.kernel_filename) self.result.add('initrd', self.boot_image_task.initrd_filename) self.result.add('filesystem_image', self.image) self.result.add('filesystem_md5', self.filesystem_checksum) if self.pxedeploy: log.warning( 'Creation of client config file from pxedeploy not implemented' ) return self.result