def prepare(self): """ prepare new root system suitable to create an initrd from it """ self.__load_boot_xml_description() boot_image_name = self.boot_xml_state.xml_data.get_name() self.__import_system_description_elements() log.info('Preparing boot image') system = System(xml_state=self.boot_xml_state, root_dir=self.boot_root_directory, allow_existing=True) manager = system.setup_repositories() system.install_bootstrap(manager) system.install_system(manager) profile = Profile(self.boot_xml_state) profile.add('kiwi_initrdname', boot_image_name) defaults = Defaults() defaults.to_profile(profile) setup = SystemSetup(self.boot_xml_state, self.__boot_description_directory(), self.boot_root_directory) setup.import_shell_environment(profile) setup.import_description() setup.import_overlay_files(follow_links=True) setup.call_config_script() system.pinch_system(manager=manager, force=True) setup.call_image_script() setup.create_init_link_from_linuxrc()
def __init__(self, xml_state, target_dir, root_dir): self.media_dir = None self.arch = platform.machine() self.root_dir = root_dir self.target_dir = target_dir self.xml_state = xml_state self.live_type = xml_state.build_type.get_flags() self.types = Defaults.get_live_iso_types() self.hybrid = xml_state.build_type.get_hybrid() self.volume_id = xml_state.build_type.get_volid() self.machine = xml_state.get_build_type_machine_section() self.mbrid = ImageIdentifier() self.mbrid.calculate_id() if not self.live_type: self.live_type = Defaults.get_default_live_iso_type() self.boot_image_task = BootImageTask('kiwi', xml_state, target_dir) self.firmware = FirmWare(xml_state) self.system_setup = SystemSetup(xml_state=xml_state, description_dir=None, root_dir=self.root_dir) self.isoname = ''.join([ target_dir, '/', xml_state.xml_data.get_name(), '.' + platform.machine(), '-' + xml_state.get_image_version(), '.iso' ]) self.live_image_file = ''.join([ target_dir, '/', xml_state.xml_data.get_name(), '-read-only.', self.arch, '-', xml_state.get_image_version() ]) self.result = Result()
def process(self): self.manual = Help() if self.__help(): return Privileges.check_for_root_permissions() self.load_xml_description(self.command_args['--description']) if self.command_args['--set-repo']: (repo_source, repo_type, repo_alias, repo_prio) = \ self.quadruple_token(self.command_args['--set-repo']) self.xml_state.set_repository(repo_source, repo_type, repo_alias, repo_prio) if self.command_args['--add-repo']: for add_repo in self.command_args['--add-repo']: (repo_source, repo_type, repo_alias, repo_prio) = \ self.quadruple_token(add_repo) self.xml_state.add_repository(repo_source, repo_type, repo_alias, repo_prio) if os.path.exists('/.buildenv'): # This build runs inside of a buildservice worker. Therefore # the repo defintions is adapted accordingly self.xml_state.translate_obs_to_suse_repositories() elif self.command_args['--obs-repo-internal']: # This build should use the internal SUSE buildservice # Be aware that the buildhost has to provide access self.xml_state.translate_obs_to_ibs_repositories() log.info('Preparing system') system = System(self.xml_state, self.command_args['--root'], self.command_args['--allow-existing-root']) manager = system.setup_repositories() system.install_bootstrap(manager) system.install_system(manager) profile = Profile(self.xml_state) defaults = Defaults() defaults.to_profile(profile) setup = SystemSetup(self.xml_state, self.command_args['--description'], self.command_args['--root']) setup.import_shell_environment(profile) setup.import_description() setup.import_overlay_files() setup.call_config_script() setup.import_image_identifier() setup.setup_groups() setup.setup_users() setup.setup_keyboard_map() setup.setup_locale() setup.setup_timezone() system.pinch_system(manager)
def setup_farmrole_params( role_options=None, alias=None, behaviors=None, setup_bundled_role=False): platform = CONF.feature.platform dist = CONF.feature.dist behaviors = behaviors or [] role_options = role_options or [] role_params = farmrole.FarmRoleParams(platform, alias=alias) if isinstance(behaviors, types.StringTypes): behaviors = [behaviors] if not (setup_bundled_role and len('{}-{}'.format(world.farm.name, alias)) < 63): Defaults.set_hostname(role_params) for opt in role_options: LOG.info('Inspect role option: %s' % opt) if opt in ('branch_latest', 'branch_stable'): role_params.advanced.agent_update_repository = opt.split('_')[1] elif 'redis processes' in opt: redis_count = re.findall(r'(\d+) redis processes', opt)[0].strip() LOG.info('Setup %s redis processes' % redis_count) role_params.database.redis_processes = int(redis_count) elif 'chef-solo' in opt: Defaults.set_chef_solo(role_params, opt) else: Defaults.apply_option(role_params, opt) if not setup_bundled_role: if dist.is_windows: role_params.advanced.reboot_after_hostinit = True elif dist.id == 'scientific-6-x' or \ (dist.id in ['centos-6-x', 'centos-7-x'] and platform.is_ec2): role_params.advanced.disable_iptables_mgmt = False if platform.is_ec2: role_params.global_variables.variables.append( role_params.global_variables, farmrole.Variable( name='REVIZOR_TEST_ID', value=getattr(world, 'test_id') ) ) if 'rabbitmq' in behaviors: role_params.network.hostname_template = '' if any(b in DATABASE_BEHAVIORS for b in behaviors): LOG.debug('Setup default db storages') Defaults.set_db_storage(role_params) if 'redis' in behaviors: LOG.info('Insert redis settings') snapshotting_type = CONF.feature.redis_snapshotting role_params.database.redis_persistence_type = snapshotting_type role_params.database.redis_use_password = True return role_params
def __init__(self, wrfout, ax=0, fig=0, plotn=(1, 1), layout='normal'): """ C : configuration settings W : data """ self.W = wrfout self.D = Defaults() # Create main figure if ax and fig: self.ax = ax self.fig = fig elif layout == 'insetv': self.fig = plt.figure(figsize=(8, 6)) self.gs = M.gridspec.GridSpec(1, 2, width_ratios=[1, 3]) self.ax0 = plt.subplot(self.gs[0]) self.ax1 = plt.subplot(self.gs[1]) elif layout == 'inseth': self.fig = plt.figure(figsize=(6, 8)) self.gs = M.gridspec.GridSpec(2, 1, height_ratios=[1, 3]) self.ax0 = plt.subplot(self.gs[0]) self.ax1 = plt.subplot(self.gs[1]) else: self.fig, self.ax = plt.subplots(nrows=plotn[0], ncols=plotn[1]) self.fig.set_dpi(self.D.dpi)
def __import_system_description_elements(self): self.xml_state.copy_displayname(self.boot_xml_state) self.xml_state.copy_name(self.boot_xml_state) self.xml_state.copy_repository_sections( target_state=self.boot_xml_state, wipe=True) self.xml_state.copy_drivers_sections(self.boot_xml_state) strip_description = XMLDescription( Defaults.get_boot_image_strip_file()) strip_xml_state = XMLState(strip_description.load()) strip_xml_state.copy_strip_sections(self.boot_xml_state) preferences_subsection_names = [ 'bootloader_theme', 'bootsplash_theme', 'locale', 'packagemanager', 'rpm_check_signatures', 'showlicense' ] self.xml_state.copy_preferences_subsections( preferences_subsection_names, self.boot_xml_state) self.xml_state.copy_bootincluded_packages(self.boot_xml_state) self.xml_state.copy_bootincluded_archives(self.boot_xml_state) self.xml_state.copy_bootdelete_packages(self.boot_xml_state) type_attributes = [ 'bootkernel', 'bootloader', 'bootprofile', 'boottimeout', 'btrfs_root_is_snapshot', 'devicepersistency', 'filesystem', 'firmware', 'fsmountoptions', 'hybrid', 'hybridpersistent', 'hybridpersistent_filesystem', 'installboot', 'installprovidefailsafe', 'kernelcmdline', 'ramonly', 'vga', 'wwid_wait_timeout' ] self.xml_state.copy_build_type_attributes(type_attributes, self.boot_xml_state) self.xml_state.copy_systemdisk_section(self.boot_xml_state) self.xml_state.copy_machine_section(self.boot_xml_state) self.xml_state.copy_oemconfig_section(self.boot_xml_state)
def test_defaults(): """Test default conversions.""" data = [ ('1m', [ C(1, 'meter', 100, 'centimeter', '[length]'), C(1, 'meter', 0.001, 'kilometer', '[length]') ]), ('100g', [ C(100, 'gram', 0.1, 'kilogram', '[mass]'), C(100, 'gram', 100000, 'milligram', '[mass]') ]), ] wf = Workflow3() if 'default_units' not in wf.settings: wf.settings['default_units'] = {} wf.settings['default_units']['[length]'] = ['centimeter', 'kilometer'] wf.settings['default_units']['[mass]'] = ['kilogram', 'milligram'] c = convert.Converter(Defaults(wf)) for t in data: i = c.parse(t[0]) res = c.convert(i) assert len(res) == len(t[1]) for j, r in enumerate(res): log.debug(r) verify_conversion(t[1][j], r)
def __init__(self, directory='.'): '''Load all definitions from a directory tree.''' self._data = {} self.defaults = Defaults() self.fields = self.defaults.build_steps + self.defaults.fields config['cpu'] = self.defaults.cpus.get(config['arch'], config['arch']) directories = [d[0] for d in os.walk(directory) if '/.' not in d[0]] for d in sorted(directories): files = glob.glob(os.path.join(d, '*.morph')) for path in sorted(files): data = self._load(path) if data is not None: data['path'] = self._demorph(path[2:]) self._fix_keys(data) self._tidy_and_insert_recursively(data) for x in self._data: dn = self._data[x] for field in dn: if field not in self.fields: log(dn, 'Invalid field "%s" in' % field, dn['path'], exit=True)
def usage(command_usage): """ Instead of the docopt way to show the usage information we provide an azurectl specific usage information. The usage data now always consists of * the generic call azurectl [global options] service <command> [<args>] * the command specific usage defined by the docopt string short form by default, long form with -h | --help * the global options """ with open(Defaults.project_file('cli.py'), 'r') as cli: program_code = cli.readlines() global_options = '\n' process_lines = False for line in program_code: if line.rstrip().startswith('global options'): process_lines = True if line.rstrip() == '"""': process_lines = False if process_lines: global_options += format(line) print 'usage: azurectl [global options] service <command> [<args>]\n' print format(command_usage).replace('usage:', ' ') if 'global options' not in command_usage: print format(global_options) if not format(command_usage).startswith('usage:'): error_details = format(command_usage).splitlines()[0] print(error_details)
def __init__(self, directory='.'): '''Load all definitions from a directory tree.''' self._data = {} self.defaults = Defaults() self.fields = self.defaults.build_steps + self.defaults.fields config['cpu'] = self.defaults.cpus.get(config['arch'], config['arch']) self.parse_files(directory)
def __init__(self, config, router_attrs): self.errors = [] self.defaults = Defaults(config['defaults'], self.errors) self.hosts = map(lambda item: Host(item, self.defaults), config['hosts']) self.groups = map(Group, config['groups']) self.networks = map(lambda item: Network(item, router_attrs), config['networks']) self.users = dict( (user.nickname, user) for user in map(User, config['people'])) self.errors.extend(check_hosts(self.hosts)) self.errors.extend(expand_groups(self.groups, self.hosts)) map(Host.clean, self.hosts)
def __systemdisk_to_profile(self): # kiwi_lvmgroup # kiwi_lvm # kiwi_LVM_LVRoot # kiwi_allFreeVolume_X # kiwi_LVM_X systemdisk = self.xml_state.get_build_type_system_disk_section() if systemdisk: self.dot_profile['kiwi_lvmgroup'] = systemdisk.get_name() if not self.dot_profile['kiwi_lvmgroup']: self.dot_profile['kiwi_lvmgroup'] = \ Defaults.get_default_volume_group_name() if self.xml_state.get_volume_management(): self.dot_profile['kiwi_lvm'] = 'true' for volume in self.xml_state.get_volumes(): if volume.name == 'LVRoot': if not volume.fullsize: self.dot_profile['kiwi_LVM_LVRoot'] = volume.size elif volume.fullsize: if volume.mountpoint: self.dot_profile['kiwi_allFreeVolume_' + volume.name] =\ 'size:all:' + volume.mountpoint else: self.dot_profile['kiwi_allFreeVolume_' + volume.name] =\ 'size:all' else: if volume.mountpoint: self.dot_profile['kiwi_LVM_' + volume.name] = \ volume.size + ':' + volume.mountpoint else: self.dot_profile['kiwi_LVM_' + volume.name] = \ volume.size
def get_volume_mbsize(self, mbsize, size_type, realpath, filesystem_name): """ Implements size lookup for the given path and desired filesystem according to the specified size type """ if size_type == 'freespace': # Please note for nested volumes which contains other volumes # the freespace calculation is not correct. Example: # /usr is a volume and /usr/lib is a volume. If freespace is # set for the /usr volume the data size calculated also # contains the data of the /usr/lib path which will live in # an extra volume later. The result will be more freespace # than expected ! Especially for the root volume this matters # most because it always nests all other volumes. Thus it is # better to use a fixed size for the root volume if it is not # configured to use all rest space # # You are invited to fix it :) volume_size = SystemSize( self.root_dir + '/' + realpath ) mbsize = int(mbsize) + \ Defaults.get_min_volume_mbytes() mbsize += volume_size.customize( volume_size.accumulate_mbyte_file_sizes(), filesystem_name ) return mbsize
def __create_live_iso_client_config(self, iso_type): """ Setup IMAGE and UNIONFS_CONFIG variables as they are used in the kiwi isoboot code. Variable contents: + IMAGE=target_device;live_iso_name_definition + UNIONFS_CONFIG=rw_device,ro_device,union_type If no real block device is used or can be predefined the word 'loop' is set as a placeholder or indicator to use a loop device. For more details please refer to the kiwi shell boot code """ iso_client_config_file = self.media_dir + '/config.isoclient' iso_client_params = Defaults.get_live_iso_client_parameters() (system_device, union_device, union_type) = iso_client_params[iso_type] with open(iso_client_config_file, 'w') as config: config.write( 'IMAGE="%s;%s.%s;%s"\n' % ( system_device, self.xml_state.xml_data.get_name(), self.arch, self.xml_state.get_image_version() ) ) config.write( 'UNIONFS_CONFIG="%s,loop,%s"\n' % (union_device, union_type) )
def prepare(self): """ prepare new root system suitable to create an initrd from it """ self.__load_boot_xml_description() boot_image_name = self.boot_xml_state.xml_data.get_name() self.__import_system_description_elements() log.info('Preparing boot image') system = System( xml_state=self.boot_xml_state, root_dir=self.boot_root_directory, allow_existing=True ) manager = system.setup_repositories() system.install_bootstrap( manager ) system.install_system( manager ) profile = Profile(self.boot_xml_state) profile.add('kiwi_initrdname', boot_image_name) defaults = Defaults() defaults.to_profile(profile) setup = SystemSetup( self.boot_xml_state, self.__boot_description_directory(), self.boot_root_directory ) setup.import_shell_environment(profile) setup.import_description() setup.import_overlay_files( follow_links=True ) setup.call_config_script() system.pinch_system( manager=manager, force=True ) setup.call_image_script() setup.create_init_link_from_linuxrc()
def __boot_description_directory(self): boot_description = self.xml_state.build_type.get_boot() if boot_description: if not boot_description[0] == '/': boot_description = \ Defaults.get_boot_image_description_path() + '/' + \ boot_description return boot_description
def __setup_secure_boot_efi_image(self, lookup_path): """ use prebuilt and signed efi images provided by the distribution """ secure_efi_lookup_path = self.root_dir + '/usr/lib64/efi/' if lookup_path: secure_efi_lookup_path = lookup_path shim_image = secure_efi_lookup_path + Defaults.get_shim_name() if not os.path.exists(shim_image): raise KiwiBootLoaderGrubSecureBootError( 'Microsoft signed shim loader %s not found' % shim_image) grub_image = secure_efi_lookup_path + Defaults.get_signed_grub_name() if not os.path.exists(grub_image): raise KiwiBootLoaderGrubSecureBootError( 'Signed grub2 efi loader %s not found' % grub_image) Command.run(['cp', shim_image, self.__get_efi_image_name()]) Command.run(['cp', grub_image, self.efi_boot_path])
def __init__(self, xml_state, root_dir): self.configured_size = xml_state.get_build_type_size() self.size = SystemSize(root_dir) self.requested_image_type = xml_state.get_build_type_name() if self.requested_image_type in Defaults.get_filesystem_image_types(): self.requested_filesystem = self.requested_image_type else: self.requested_filesystem = xml_state.build_type.get_filesystem()
def __xsltproc(self): Command.run( [ 'xsltproc', '-o', self.description_xslt_processed.name, Defaults.get_xsl_stylesheet_file(), self.description ] )
def __accumulate_volume_size(self, root_mbytes): """ calculate number of mbytes to add to the disk to allow the creaton of the volumes with their configured size """ disk_volume_mbytes = 0 data_volume_mbytes = self.__calculate_volume_mbytes() root_volume = self.__get_root_volume_configuration() for volume in self.volumes: if volume.realpath and not volume.realpath == '/' and volume.size: [size_type, req_size] = volume.size.split(':') disk_add_mbytes = 0 if size_type == 'freespace': disk_add_mbytes += int(req_size) + \ Defaults.get_min_volume_mbytes() else: disk_add_mbytes += int(req_size) - \ data_volume_mbytes.volume[volume.realpath] if disk_add_mbytes > 0: disk_volume_mbytes += disk_add_mbytes else: log.warning( 'volume size of %s MB for %s is too small, skipped', int(req_size), volume.realpath ) if root_volume: if root_volume.size_type == 'freespace': disk_add_mbytes += root_volume.req_size + \ Defaults.get_min_volume_mbytes() else: disk_add_mbytes = root_volume.req_size - \ root_mbytes + data_volume_mbytes.total if disk_add_mbytes > 0: disk_volume_mbytes += disk_add_mbytes else: log.warning( 'root volume size of %s MB is too small, skipped', root_volume.req_size ) return disk_volume_mbytes
def get_target_name_for_format(self, format_name): if format_name != 'raw': if format_name not in Defaults.get_disk_format_types(): raise KiwiFormatSetupError('unsupported disk format %s' % format_name) return ''.join([ self.target_dir, '/', self.xml_state.xml_data.get_name(), '.' + format_name ])
class State: def __init__(self, config, router_attrs): self.errors = [] self.defaults = Defaults(config['defaults'], self.errors) self.hosts = map(lambda item: Host(item, self.defaults), config['hosts']) self.groups = map(Group, config['groups']) self.networks = map(lambda item: Network(item, router_attrs), config['networks']) self.users = dict( (user.nickname, user) for user in map(User, config['people'])) self.errors.extend(check_hosts(self.hosts)) self.errors.extend(expand_groups(self.groups, self.hosts)) map(Host.clean, self.hosts) def choose_net(self, net_name): return network.choose_net(self.networks, net_name) def get_canonical_hostname(self, host): return self.defaults.get_canonical_hostname(host.name) def get_nagios(self, addr): rv = self.defaults.nagios if addr: chosen = network.get_nagios(self.networks, addr) if chosen: rv = chosen return self.defaults.expand_ip(rv) def belongs_to(self, host): return belongs_to(self.networks, host) def find(self, hostname): candidates = filter( lambda host: (hostname in [host.name, host.sname] + host.aliases + host.saliases), self.hosts) return candidates[0] if len(candidates) == 1 else None def is_gray(self, host): return host.addr != None and host.addr.startswith( self.defaults.network_prefix)
def __init__(self, xml_state, target_dir, root_dir): self.media_dir = None self.arch = platform.machine() self.root_dir = root_dir self.target_dir = target_dir self.xml_state = xml_state self.live_type = xml_state.build_type.get_flags() self.types = Defaults.get_live_iso_types() self.hybrid = xml_state.build_type.get_hybrid() self.volume_id = xml_state.build_type.get_volid() self.machine = xml_state.get_build_type_machine_section() self.mbrid = ImageIdentifier() self.mbrid.calculate_id() if not self.live_type: self.live_type = Defaults.get_default_live_iso_type() self.boot_image_task = BootImageTask( 'kiwi', xml_state, target_dir ) self.firmware = FirmWare( xml_state ) self.system_setup = SystemSetup( xml_state=xml_state, description_dir=None, root_dir=self.root_dir ) self.isoname = ''.join( [ target_dir, '/', xml_state.xml_data.get_name(), '.' + platform.machine(), '-' + xml_state.get_image_version(), '.iso' ] ) self.live_image_file = ''.join( [ target_dir, '/', xml_state.xml_data.get_name(), '-read-only.', self.arch, '-', xml_state.get_image_version() ] ) self.result = Result()
class State: def __init__(self, config, router_attrs): self.errors = [] self.defaults = Defaults(config['defaults'], self.errors) self.hosts = map(lambda item: Host(item, self.defaults), config['hosts']) self.groups = map(Group, config['groups']) self.networks = map(lambda item: Network(item, router_attrs), config['networks']) self.users = dict((user.nickname, user) for user in map(User, config['people'])) self.errors.extend(check_hosts(self.hosts)) self.errors.extend(expand_groups(self.groups, self.hosts)) map(Host.clean, self.hosts) def choose_net(self, net_name): return network.choose_net(self.networks, net_name) def get_canonical_hostname(self, host): return self.defaults.get_canonical_hostname(host.name) def get_nagios(self, addr): rv = self.defaults.nagios if addr: chosen = network.get_nagios(self.networks, addr) if chosen: rv = chosen return self.defaults.expand_ip(rv) def belongs_to(self, host): return belongs_to(self.networks, host) def find(self, hostname): candidates = filter(lambda host: (hostname in [host.name, host.sname] + host.aliases + host.saliases), self.hosts) return candidates[0] if len(candidates) == 1 else None def is_gray(self, host): return host.addr != None and host.addr.startswith(self.defaults.network_prefix)
def __accumulate_volume_size(self, root_mbytes): """ calculate number of mbytes to add to the disk to allow the creaton of the volumes with their configured size """ disk_volume_mbytes = 0 data_volume_mbytes = self.__calculate_volume_mbytes() root_volume = self.__get_root_volume_configuration() for volume in self.volumes: if volume.realpath and not volume.realpath == '/' and volume.size: [size_type, req_size] = volume.size.split(':') disk_add_mbytes = 0 if size_type == 'freespace': disk_add_mbytes += int(req_size) + \ Defaults.get_min_volume_mbytes() else: disk_add_mbytes += int(req_size) - \ data_volume_mbytes.volume[volume.realpath] if disk_add_mbytes > 0: disk_volume_mbytes += disk_add_mbytes else: log.warning( 'volume size of %s MB for %s is too small, skipped', int(req_size), volume.realpath) if root_volume: if root_volume.size_type == 'freespace': disk_add_mbytes += root_volume.req_size + \ Defaults.get_min_volume_mbytes() else: disk_add_mbytes = root_volume.req_size - \ root_mbytes + data_volume_mbytes.total if disk_add_mbytes > 0: disk_volume_mbytes += disk_add_mbytes else: log.warning('root volume size of %s MB is too small, skipped', root_volume.req_size) return disk_volume_mbytes
def get_volume_group_name(self): """ get volume group name from systemdisk """ systemdisk_section = self.get_build_type_system_disk_section() volume_group_name = None if systemdisk_section: volume_group_name = systemdisk_section.get_name() if not volume_group_name: volume_group_name = Defaults.get_default_volume_group_name() return volume_group_name
def create(self): log.info('Creating %s filesystem', self.requested_filesystem) supported_filesystems = Defaults.get_filesystem_image_types() if self.requested_filesystem not in supported_filesystems: raise KiwiFileSystemSetupError('Unknown filesystem: %s' % self.requested_filesystem) if self.requested_filesystem not in self.filesystems_no_device_node: self.__operate_on_loop() else: self.__operate_on_file() self.result.add('filesystem_image', self.filename) return self.result
def get_target_name_for_format(self, format_name): if format_name != 'raw': if format_name not in Defaults.get_disk_format_types(): raise KiwiFormatSetupError( 'unsupported disk format %s' % format_name ) return ''.join( [ self.target_dir, '/', self.xml_state.xml_data.get_name(), '.' + format_name ] )
def __new__(self, xml_state, target_dir, root_dir): requested_image_type = xml_state.get_build_type_name() if requested_image_type in Defaults.get_filesystem_image_types(): return FileSystemBuilder( xml_state, target_dir, root_dir ) elif requested_image_type in Defaults.get_disk_image_types(): return DiskBuilder( xml_state, target_dir, root_dir ) elif requested_image_type in Defaults.get_live_image_types(): return LiveImageBuilder( xml_state, target_dir, root_dir ) elif requested_image_type in Defaults.get_network_image_types(): return PxeBuilder( xml_state, target_dir, root_dir ) elif requested_image_type in Defaults.get_archive_image_types(): return ArchiveBuilder( xml_state, target_dir, root_dir ) elif requested_image_type in Defaults.get_container_image_types(): return ContainerBuilder( xml_state, target_dir, root_dir ) else: raise KiwiRequestedTypeError( 'requested image type %s not supported' % requested_image_type )
def __init__(self, xml_state): self.host_architecture = platform.machine() self.firmware = xml_state.build_type.get_firmware() self.zipl_target_type = xml_state.build_type.get_zipl_targettype() self.efi_capable_firmware_names = ['efi', 'uefi', 'vboot'] self.ec2_firmware_names = ['ec2', 'ec2hvm'] firmware_types = Defaults.get_firmware_types() if self.firmware: if self.firmware not in firmware_types[self.host_architecture]: raise KiwiNotImplementedError( 'support for firmware %s for arch %s not implemented' % (self.firmware, self.host_architecture))
def memoize(keyformat, time=60): """Decorator to memoize functions using memcache.""" def decorator(fxn): def wrapper(*args, **kwargs): key = keyformat % args[0:keyformat.count('%')] data = memcache.get(key) if data is not None: return data data = fxn(*args, **kwargs) memcache.set(key, data, time) return data return wrapper return decorator if not Defaults.isDevelopmentServer() else fxn
def __setup_secure_boot_efi_image(self, lookup_path): """ use prebuilt and signed efi images provided by the distribution """ secure_efi_lookup_path = self.root_dir + '/usr/lib64/efi/' if lookup_path: secure_efi_lookup_path = lookup_path shim_image = secure_efi_lookup_path + Defaults.get_shim_name() if not os.path.exists(shim_image): raise KiwiBootLoaderGrubSecureBootError( 'Microsoft signed shim loader %s not found' % shim_image ) grub_image = secure_efi_lookup_path + Defaults.get_signed_grub_name() if not os.path.exists(grub_image): raise KiwiBootLoaderGrubSecureBootError( 'Signed grub2 efi loader %s not found' % grub_image ) Command.run( ['cp', shim_image, self.__get_efi_image_name()] ) Command.run( ['cp', grub_image, self.efi_boot_path] )
def __init__(self,config,t,**kwargs): """ config : configuration settings t : time, datenum format optional key-word arguments: wrfdir : if picked, domain is cut down """ self.C = config self.D = Defaults() self.path_to_data = self.C.path_to_RUC self.output_root = self.C.output_root self.t = t # Convert the datenum into time sequence self.ts = self.get_time_seq() self.version = self.get_version() self.fname = self.get_fname() self.fpath = os.path.join(self.path_to_data,self.fname+'.nc') self.nc = Dataset(self.fpath) # Original lat and lon grids self.lats, self.lons = self.get_latlon() # Original lat/lon 1D arrays self.lats1D = self.lats[:,self.lats.shape[1]/2] self.lons1D = self.lons[self.lons.shape[0]/2,:] if 'wrfdir' in kwargs: # It means all data should be cut to this size self.limits = self.colocate_WRF_map(kwargs['wrfdir']) self.lats2D = self.cut_2D_array(self.lats) self.lons2D = self.cut_2D_array(self.lons) self.lats, self.lons = self.cut_lat_lon() self.y_dim = len(self.lats) self.x_dim = len(self.lons) #self.lats1D = self.lats[:,self.lats.shape[1]/2] self.lats1D = self.lats #self.lons1D = self.lons[self.lons.shape[0]/2,:] self.lons1D = self.lons else: # Leave the dimensions the way they were self.y_dim = self.lats.shape[0] self.x_dim = self.lats.shape[1] print('RUC file loaded from {0}'.format(self.fpath))
def __init__(self,config): self.C = config # Set defaults if they don't appear in user's settings self.D = Defaults() self.font_prop = getattr(self.C,'font_prop',self.D.font_prop) self.usetex = getattr(self.C,'usetex',self.D.usetex) self.dpi = getattr(self.C,'dpi',self.D.dpi) self.plot_titles = getattr(self.C,'plot_titles',self.D.plot_titles) # Set some general settings M.rc('text',usetex=self.usetex) M.rc('font',**self.font_prop) M.rcParams['savefig.dpi'] = self.dpi
def __init__(self, fpath, config): self.C = config self.D = Defaults() self.ec = Dataset(fpath, 'r') self.times = self.ecmwf_times() # self.dx = # self.dy = self.lats = self.ec.variables['g0_lat_2'][:] #N to S self.lons = self.ec.variables['g0_lon_3'][:] #W to E self.lvs = self.ec.variables['lv_ISBL1'][:] #jet to sfc self.fields = self.ec.variables.keys() self.dims = self.ec.variables['Z_GDS0_ISBL'].shape self.x_dim = self.dims[3] self.y_dim = self.dims[2] self.z_dim = self.dims[1]
def __get_command_implementations(self, service): command_implementations = [] glob_match = Defaults.project_file('.') + '/*task.py' for source_file in glob.iglob(glob_match): with open(source_file, 'r') as source: for line in source: if re.search('usage: (.*)', line): command_path = os.path.basename(source_file).split('_') if command_path[0] == service: command_path.pop() command_implementations.append( ' '.join(command_path) ) break return command_implementations
def customize(self, size, requested_filesystem): """ increase the sum of all file sizes by an empiric factor """ if requested_filesystem.startswith('ext'): size *= 1.5 file_count = self.accumulate_files() inode_mbytes = \ file_count * Defaults.get_default_inode_size() / 1048576 size += 2 * inode_mbytes elif requested_filesystem == 'btrfs': size *= 1.5 elif requested_filesystem == 'xfs': size *= 1.2 return int(size)
def __init__(self, config, router_attrs): self.errors = [] self.defaults = Defaults(config['defaults'], self.errors) self.hosts = map(lambda item: Host(item, self.defaults), config['hosts']) self.groups = map(Group, config['groups']) self.networks = map(lambda item: Network(item, router_attrs), config['networks']) self.users = dict((user.nickname, user) for user in map(User, config['people'])) self.errors.extend(check_hosts(self.hosts)) self.errors.extend(expand_groups(self.groups, self.hosts)) map(Host.clean, self.hosts)
def create(self): supported_archives = Defaults.get_archive_image_types() if self.requested_archive_type not in supported_archives: raise KiwiArchiveSetupError('Unknown archive type: %s' % self.requested_archive_type) if self.requested_archive_type == 'tbz': log.info('Creating XZ compressed tar archive') archive = ArchiveTar(self.__target_file_for('tar')) archive.create_xz_compressed(self.root_dir) checksum = Checksum(self.filename) log.info('--> Creating archive checksum') checksum.md5(self.checksum) self.result.add('root_archive', self.filename) self.result.add('root_archive_checksum', self.checksum) return self.result
def load(self): self.__xsltproc() try: relaxng = etree.RelaxNG(etree.parse(Defaults.get_schema_file())) except Exception as e: raise KiwiSchemaImportError('%s: %s' % (type(e).__name__, format(e))) try: description = etree.parse(self.description_xslt_processed.name) validation_ok = relaxng.validate(description) except Exception as e: raise KiwiValidationError('%s: %s' % (type(e).__name__, format(e))) if not validation_ok: raise KiwiDescriptionInvalid('Schema validation for %s failed' % self.description) return self.__parse()
def create(self): log.info( 'Creating %s filesystem', self.requested_filesystem ) supported_filesystems = Defaults.get_filesystem_image_types() if self.requested_filesystem not in supported_filesystems: raise KiwiFileSystemSetupError( 'Unknown filesystem: %s' % self.requested_filesystem ) if self.requested_filesystem not in self.filesystems_no_device_node: self.__operate_on_loop() else: self.__operate_on_file() self.result.add( 'filesystem_image', self.filename ) return self.result
def __init__(self, config, wrfout, data=0, fig=0, ax=0): # import pdb; pdb.set_trace() self.C = config self.D = Defaults() self.W = wrfout if isinstance(fig, M.figure.Figure): self.fig = fig self.ax = ax else: super(Clicker, self).__init__(config, wrfout, fig=fig, ax=ax) self.bmap, self.x, self.y = self.basemap_setup() if isinstance(data, N.ndarray): # Lazily assuming it's reflectivity S = Scales('cref', 2000) self.overlay_data(data, V=S.clvs, cmap=S.cm)
def __init__(self, directory='.'): '''Load all definitions from a directory tree.''' self._data = {} self._trees = {} self.defaults = Defaults() config['cpu'] = self.defaults.cpus.get(config['arch'], config['arch']) self.parse_files(directory) self._check_trees() for path in self._data: try: this = self._data[path] if this.get('ref') and self._trees.get(path): if this['ref'] == self._trees.get(path)[0]: this['tree'] = self._trees.get(path)[1] except: log('DEFINITIONS', 'WARNING: problem with .trees file') pass
def memoize2(time=120): """Decorator to memoize functions using memcache.""" def decorator(fxn): def wrapper(*args, **kwargs): m = hashlib.md5() m.update(str(args)) m.update(str(kwargs)) key = m.hexdigest() data = memcache.get(key) if data is not None: logging.debug('key: "%s", hit' % key) return data logging.debug('key: "%s", miss' % key) data = fxn(*args, **kwargs) memcache.set(key, data, time) return data return wrapper return decorator if not Defaults.isDevelopmentServer() else fxn
def __init__(self, config, wrfout, ax=0, fig=0, plotn=(1, 1), layout='normal'): """ C : configuration settings W : data """ self.C = config self.W = wrfout self.D = Defaults() self.output_fpath = self.C.output_root #if wrfout=='RUC': # pass #else: # self.W = wrfout # Get settings for figure dpi = getattr(self.C, 'DPI', self.D.dpi) # Create main figure if ax and fig: self.ax = ax self.fig = fig elif layout == 'insetv': self.fig = plt.figure(figsize=(8, 6)) self.gs = M.gridspec.GridSpec(1, 2, width_ratios=[1, 3]) self.ax0 = plt.subplot(self.gs[0]) self.ax1 = plt.subplot(self.gs[1]) elif layout == 'inseth': self.fig = plt.figure(figsize=(6, 8)) self.gs = M.gridspec.GridSpec(2, 1, height_ratios=[1, 3]) self.ax0 = plt.subplot(self.gs[0]) self.ax1 = plt.subplot(self.gs[1]) else: self.fig, self.ax = plt.subplots(nrows=plotn[0], ncols=plotn[1]) self.fig.set_dpi(dpi)
def create(self): supported_archives = Defaults.get_archive_image_types() if self.requested_archive_type not in supported_archives: raise KiwiArchiveSetupError( 'Unknown archive type: %s' % self.requested_archive_type ) if self.requested_archive_type == 'tbz': log.info('Creating XZ compressed tar archive') archive = ArchiveTar( self.__target_file_for('tar') ) archive.create_xz_compressed(self.root_dir) checksum = Checksum(self.filename) log.info('--> Creating archive checksum') checksum.md5(self.checksum) self.result.add( 'root_archive', self.filename ) self.result.add( 'root_archive_checksum', self.checksum ) return self.result
def load_command(self): command = self.get_command() service = self.get_servicename() if service == 'compat': return self.invoke_kiwicompat( self.all_args['<legacy_args>'][1:] ) if not command: raise KiwiLoadCommandUndefined( 'No command specified for %s service' % service ) command_source_file = Defaults.project_file( service + '_' + command + '_task.py' ) if not os.path.exists(command_source_file): from logger import log log.info('Did you mean') for service_command in self.__get_command_implementations(service): log.info('--> kiwi %s', service_command) raise SystemExit self.command_loaded = importlib.import_module( 'kiwi.' + service + '_' + command + '_task' ) return self.command_loaded
def load(self): self.__xsltproc() try: relaxng = etree.RelaxNG( etree.parse(Defaults.get_schema_file()) ) except Exception as e: raise KiwiSchemaImportError( '%s: %s' % (type(e).__name__, format(e)) ) try: description = etree.parse(self.description_xslt_processed.name) validation_ok = relaxng.validate( description ) except Exception as e: raise KiwiValidationError( '%s: %s' % (type(e).__name__, format(e)) ) if not validation_ok: raise KiwiDescriptionInvalid( 'Schema validation for %s failed' % self.description ) return self.__parse()
def import_description(self): """ import XML descriptions, custom scripts and script helper methods """ log.info('Importing Image description to system tree') description = self.root_dir + '/image/config.xml' log.info('--> Importing state XML description as image/config.xml') Path.create(self.root_dir + '/image') with open(description, 'w') as config: config.write('<?xml version="1.0" encoding="utf-8"?>') self.xml_state.xml_data.export(outfile=config, level=0) need_script_helper_functions = False config_script = self.description_dir + '/config.sh' image_script = self.description_dir + '/images.sh' bootloader_scripts = { 'edit_boot_config.sh': self.xml_state.build_type.get_editbootconfig(), 'edit_boot_install.sh': self.xml_state.build_type.get_editbootinstall() } sorted_bootloader_scripts = OrderedDict( sorted(bootloader_scripts.items()) ) script_target = self.root_dir + '/image/' for name, bootloader_script in sorted_bootloader_scripts.iteritems(): if bootloader_script: script_file = self.description_dir + '/' + bootloader_script if os.path.exists(script_file): log.info( '--> Importing %s script as %s', bootloader_script, 'image/' + name ) Command.run( [ 'cp', script_file, script_target + name ] ) else: raise KiwiScriptFailed( 'Specified script %s does not exist' % script_file ) if os.path.exists(config_script): log.info('--> Importing config script as image/config.sh') Command.run(['cp', config_script, script_target]) need_script_helper_functions = True if os.path.exists(image_script): log.info('--> Importing image script as image/images.sh') Command.run(['cp', image_script, script_target]) need_script_helper_functions = True if need_script_helper_functions: script_functions = Defaults.get_common_functions_file() script_functions_target = self.root_dir + '/.kconfig' log.info('--> Importing script helper functions') Command.run([ 'cp', script_functions, script_functions_target ])
def process(self): self.manual = Help() if self.__help(): return Privileges.check_for_root_permissions() image_root = self.command_args['--target-dir'] + '/build/image-root' Path.create(image_root) if not self.global_args['--logfile']: log.set_logfile( self.command_args['--target-dir'] + '/build/image-root.log' ) self.load_xml_description( self.command_args['--description'] ) if self.command_args['--set-repo']: (repo_source, repo_type, repo_alias, repo_prio) = \ self.quadruple_token(self.command_args['--set-repo']) self.xml_state.set_repository( repo_source, repo_type, repo_alias, repo_prio ) if self.command_args['--add-repo']: for add_repo in self.command_args['--add-repo']: (repo_source, repo_type, repo_alias, repo_prio) = \ self.quadruple_token(add_repo) self.xml_state.add_repository( repo_source, repo_type, repo_alias, repo_prio ) Path.create(self.command_args['--target-dir']) if os.path.exists('/.buildenv'): # This build runs inside of a buildservice worker. Therefore # the repo defintions is adapted accordingly self.xml_state.translate_obs_to_suse_repositories() elif self.command_args['--obs-repo-internal']: # This build should use the internal SUSE buildservice # Be aware that the buildhost has to provide access self.xml_state.translate_obs_to_ibs_repositories() log.info('Preparing new root system') system = System( self.xml_state, image_root, True ) manager = system.setup_repositories() system.install_bootstrap(manager) system.install_system( manager ) profile = Profile(self.xml_state) defaults = Defaults() defaults.to_profile(profile) setup = SystemSetup( self.xml_state, self.command_args['--description'], image_root ) setup.import_shell_environment(profile) setup.import_description() setup.import_overlay_files() setup.call_config_script() setup.import_image_identifier() setup.setup_groups() setup.setup_users() setup.setup_keyboard_map() setup.setup_locale() setup.setup_timezone() system.pinch_system( manager ) # make sure system instance is cleaned up now del system setup.call_image_script() # make sure setup instance is cleaned up now del setup log.info('Creating system image') image_builder = ImageBuilder( self.xml_state, self.command_args['--target-dir'], image_root ) result = image_builder.create() result.print_results() result.dump( self.command_args['--target-dir'] + '/kiwi.result' )
def boot_partition_size(self): if self.need_boot_partition(): if self.bootpart_mbytes: return self.bootpart_mbytes else: return Defaults.get_default_boot_mbytes()
def __import_system_description_elements(self): self.xml_state.copy_displayname( self.boot_xml_state ) self.xml_state.copy_name( self.boot_xml_state ) self.xml_state.copy_repository_sections( target_state=self.boot_xml_state, wipe=True ) self.xml_state.copy_drivers_sections( self.boot_xml_state ) strip_description = XMLDescription( Defaults.get_boot_image_strip_file() ) strip_xml_state = XMLState(strip_description.load()) strip_xml_state.copy_strip_sections( self.boot_xml_state ) preferences_subsection_names = [ 'bootloader_theme', 'bootsplash_theme', 'locale', 'packagemanager', 'rpm_check_signatures', 'showlicense' ] self.xml_state.copy_preferences_subsections( preferences_subsection_names, self.boot_xml_state ) self.xml_state.copy_bootincluded_packages( self.boot_xml_state ) self.xml_state.copy_bootincluded_archives( self.boot_xml_state ) self.xml_state.copy_bootdelete_packages( self.boot_xml_state ) type_attributes = [ 'bootkernel', 'bootloader', 'bootprofile', 'boottimeout', 'btrfs_root_is_snapshot', 'devicepersistency', 'filesystem', 'firmware', 'fsmountoptions', 'hybrid', 'hybridpersistent', 'hybridpersistent_filesystem', 'installboot', 'installprovidefailsafe', 'kernelcmdline', 'ramonly', 'vga', 'wwid_wait_timeout' ] self.xml_state.copy_build_type_attributes( type_attributes, self.boot_xml_state ) self.xml_state.copy_systemdisk_section( self.boot_xml_state ) self.xml_state.copy_machine_section( self.boot_xml_state ) self.xml_state.copy_oemconfig_section( self.boot_xml_state )
def process(self): self.manual = Help() if self.__help(): return Privileges.check_for_root_permissions() self.load_xml_description( self.command_args['--description'] ) if self.command_args['--set-repo']: (repo_source, repo_type, repo_alias, repo_prio) = \ self.quadruple_token(self.command_args['--set-repo']) self.xml_state.set_repository( repo_source, repo_type, repo_alias, repo_prio ) if self.command_args['--add-repo']: for add_repo in self.command_args['--add-repo']: (repo_source, repo_type, repo_alias, repo_prio) = \ self.quadruple_token(add_repo) self.xml_state.add_repository( repo_source, repo_type, repo_alias, repo_prio ) if os.path.exists('/.buildenv'): # This build runs inside of a buildservice worker. Therefore # the repo defintions is adapted accordingly self.xml_state.translate_obs_to_suse_repositories() elif self.command_args['--obs-repo-internal']: # This build should use the internal SUSE buildservice # Be aware that the buildhost has to provide access self.xml_state.translate_obs_to_ibs_repositories() log.info('Preparing system') system = System( self.xml_state, self.command_args['--root'], self.command_args['--allow-existing-root'] ) manager = system.setup_repositories() system.install_bootstrap(manager) system.install_system( manager ) profile = Profile(self.xml_state) defaults = Defaults() defaults.to_profile(profile) setup = SystemSetup( self.xml_state, self.command_args['--description'], self.command_args['--root'] ) setup.import_shell_environment(profile) setup.import_description() setup.import_overlay_files() setup.call_config_script() setup.import_image_identifier() setup.setup_groups() setup.setup_users() setup.setup_keyboard_map() setup.setup_locale() setup.setup_timezone() system.pinch_system( manager )
def convert(query): """Perform conversion and send results to Alfred.""" error = None results = None defs = Defaults(wf) c = Converter(defs) try: i = c.parse(query) except ValueError as err: log.critical(u'invalid query (%s): %s', query, err) error = err.message else: try: results = c.convert(i) log.debug('results=%r', results) except NoToUnits: log.critical(u'No to_units (or defaults) for %s', i.dimensionality) error = u'No destination units (or defaults) for {}'.format( i.dimensionality) except DimensionalityError as err: log.critical(u'invalid conversion (%s): %s', query, err) error = u"Can't convert from {} {} to {} {}".format( err.units1, err.dim1, err.units2, err.dim2) if not error and not results: error = 'Conversion input not understood' if error: # Show error wf.add_item(error, 'For example: 2.5cm in | 178lb kg | 200m/s mph', valid=False, icon=ICON_WARNING) else: # Show results f = Formatter(DECIMAL_PLACES, DECIMAL_SEPARATOR, THOUSANDS_SEPARATOR) wf.setvar('query', query) for conv in results: value = copytext = f.formatted(conv.to_number, conv.to_unit) if not COPY_UNIT: copytext = f.formatted(conv.to_number) it = wf.add_item(value, valid=True, arg=copytext, copytext=copytext, largetext=value, icon='icon.png') action = 'save' name = 'Save' if defs.is_default(conv.dimensionality, conv.to_unit): action = 'delete' name = 'Remove' mod = it.add_modifier('cmd', u'{} {} as default unit for {}'.format( name, conv.to_unit, conv.dimensionality)) mod.setvar('action', action) mod.setvar('unit', conv.to_unit) mod.setvar('dimensionality', conv.dimensionality) wf.send_feedback() log.debug('finished') return 0
def create_recovery_archive(self): """ create a compressed recovery archive from the root tree for use with kiwi's recvoery system. The method creates additional data into the image root filesystem which is deleted prior to the creation of a new recovery data set """ # cleanup bash_comand = [ 'rm', '-f', self.root_dir + '/recovery.*' ] Command.run(['bash', '-c', ' '.join(bash_comand)]) if not self.oemconfig['recovery']: return # recovery.tar log.info('Creating recovery tar archive') metadata = { 'archive_name': self.root_dir + '/recovery.tar', 'archive_filecount': self.root_dir + '/recovery.tar.files', 'archive_size': self.root_dir + '/recovery.tar.size', 'partition_size': self.root_dir + '/recovery.partition.size', 'partition_filesystem': self.root_dir + '/recovery.tar.filesystem' } recovery_archive = NamedTemporaryFile( delete=False ) archive = ArchiveTar( filename=recovery_archive.name, create_from_file_list=False ) archive.create( source_dir=self.root_dir, exclude=['dev', 'proc', 'sys'], options=[ '--numeric-owner', '--hard-dereference', '--preserve-permissions' ] ) Command.run( ['mv', recovery_archive.name, metadata['archive_name']] ) # recovery.tar.filesystem recovery_filesystem = self.xml_state.build_type.get_filesystem() with open(metadata['partition_filesystem'], 'w') as partfs: partfs.write('%s' % recovery_filesystem) log.info( '--> Recovery partition filesystem: %s', recovery_filesystem ) # recovery.tar.files bash_comand = [ 'tar', '-tf', metadata['archive_name'], '|', 'wc', '-l' ] tar_files_call = Command.run( ['bash', '-c', ' '.join(bash_comand)] ) tar_files_count = int(tar_files_call.output.rstrip('\n')) with open(metadata['archive_filecount'], 'w') as files: files.write('%d\n' % tar_files_count) log.info( '--> Recovery file count: %d files', tar_files_count ) # recovery.tar.size recovery_archive_size_bytes = os.path.getsize(metadata['archive_name']) with open(metadata['archive_size'], 'w') as size: size.write('%d' % recovery_archive_size_bytes) log.info( '--> Recovery uncompressed size: %d mbytes', int(recovery_archive_size_bytes / 1048576) ) # recovery.tar.gz log.info('--> Compressing recovery archive') compress = Compress(self.root_dir + '/recovery.tar') compress.gzip() # recovery.partition.size recovery_archive_gz_size_mbytes = int( os.path.getsize(metadata['archive_name'] + '.gz') / 1048576 ) recovery_partition_mbytes = recovery_archive_gz_size_mbytes \ + Defaults.get_recovery_spare_mbytes() with open(metadata['partition_size'], 'w') as gzsize: gzsize.write('%d' % recovery_partition_mbytes) log.info( '--> Recovery partition size: %d mbytes', recovery_partition_mbytes ) # delete recovery archive if inplace recovery is requested # In this mode the recovery archive is created at install time # and not at image creation time. However the recovery metadata # is preserved in order to be able to check if enough space # is available on the disk to create the recovery archive. if self.oemconfig['recovery_inplace']: log.info( '--> Inplace recovery requested, deleting archive' ) Path.wipe(metadata['archive_name'] + '.gz')