def make_partition(dev, begin, end, ptype): if ptype not in ('primary', 'logical'): raise errors.WrongPartitionSchemeError('Wrong partition type: %s' % ptype) # check begin >= end if begin >= end: raise errors.WrongPartitionSchemeError( 'Wrong boundaries: begin >= end') # check if begin and end are inside one of free spaces available if not any( x['fstype'] == 'free' and begin >= x['begin'] and end <= x['end'] for x in info(dev)['parts']): raise errors.WrongPartitionSchemeError( 'Invalid boundaries: begin and end ' 'are not inside available free space') utils.execute('parted', '-a', 'optimal', '-s', dev, 'unit', 'MiB', 'mkpart', ptype, str(begin), str(end), check_exit_code=[0])
def boot_device(self, grub_version=2): # We assume /boot is a separate partition. If it is not # then we try to use root file system boot_fs = self.fs_by_mount('/boot') or self.fs_by_mount('/') if not boot_fs: raise errors.WrongPartitionSchemeError( 'Error while trying to find boot device: ' 'boot file system not fount, ' 'it must be a separate mount point') if grub_version == 1: # Legacy GRUB has a limitation. It is not able to mount MD devices. # If it is MD compatible it is only able to ignore MD metadata # and to mount one of those devices which are parts of MD device, # but it is possible only if MD device is a MIRROR. md = self.md_by_name(boot_fs.device) if md: try: return md.devices[0] except IndexError: raise errors.WrongPartitionSchemeError( 'Error while trying to find boot device: ' 'md device %s does not have devices attached' % md.name) # Legacy GRUB is not able to mount LVM devices. if self.lv_by_device_name(boot_fs.device): raise errors.WrongPartitionSchemeError( 'Error while trying to find boot device: ' 'found device is %s but legacy grub is not able to ' 'mount logical volumes' % boot_fs.device) return boot_fs.device
def set_partition_flag(dev, num, flag, state='on'): """Sets flag on a partition :param dev: A device file, e.g. /dev/sda. :param num: Partition number :param flag: Flag name. Must be one of 'bios_grub', 'legacy_boot', 'boot', 'raid', 'lvm' :param state: Desiable flag state. 'on' or 'off'. Default is 'on'. :returns: None """ # parted supports more flags but we are interested in # setting only this subset of them. # not all of these flags are compatible with one another. if flag not in ('bios_grub', 'legacy_boot', 'boot', 'raid', 'lvm'): raise errors.WrongPartitionSchemeError( 'Unsupported partition flag: %s' % flag) if state not in ('on', 'off'): raise errors.WrongPartitionSchemeError( 'Wrong partition flag state: %s' % state) utils.execute('parted', '-s', dev, 'set', str(num), flag, state, check_exit_code=[0])
def make_partition(dev, begin, end, ptype): LOG.debug('Trying to create a partition: dev=%s begin=%s end=%s' % (dev, begin, end)) if ptype not in ('primary', 'logical'): raise errors.WrongPartitionSchemeError('Wrong partition type: %s' % ptype) # check begin >= end if begin >= end: raise errors.WrongPartitionSchemeError( 'Wrong boundaries: begin >= end') # check if begin and end are inside one of free spaces available if not any( x['fstype'] == 'free' and begin >= x['begin'] and end <= x['end'] for x in info(dev)['parts']): raise errors.WrongPartitionSchemeError( 'Invalid boundaries: begin and end ' 'are not inside available free space') utils.execute('udevadm', 'settle', '--quiet', check_exit_code=[0]) out, err = utils.execute('parted', '-a', 'optimal', '-s', dev, 'unit', 'MiB', 'mkpart', ptype, str(begin), str(end), check_exit_code=[0, 1]) LOG.debug('Parted output: \n%s' % out) reread_partitions(dev, out=out)
def make_partition(dev, begin, end, ptype, alignment='optimal'): """Creates a partition on the device. :param dev: A device file, e.g. /dev/sda. :param begin: Beginning of the partition. :param end: Ending of the partition. :param ptype: Partition type: primary or logical. :param alignment: Set alignment mode for newly created partitions, valid alignment types are: none, cylinder, minimal, optimal. For more information about this you can find in GNU parted manual. :returns: None """ LOG.debug('Trying to create a partition: dev=%s begin=%s end=%s' % (dev, begin, end)) if ptype not in ('primary', 'logical'): raise errors.WrongPartitionSchemeError('Wrong partition type: %s' % ptype) if alignment not in PARTITION_ALIGMENT: raise errors.WrongPartitionSchemeError( 'Wrong partition alignment requested: %s' % alignment) # check begin >= end if begin >= end: raise errors.WrongPartitionSchemeError( 'Wrong boundaries: begin >= end') # check if begin and end are inside one of free spaces available if not any( x['fstype'] == 'free' and begin >= x['begin'] and end <= x['end'] for x in info(dev)['parts']): raise errors.WrongPartitionSchemeError( 'Invalid boundaries: begin and end ' 'are not inside available free space') utils.udevadm_settle() out, err = utils.execute('parted', '-a', alignment, '-s', dev, 'unit', 'MiB', 'mkpart', ptype, str(begin), str(end), check_exit_code=[0, 1]) LOG.debug('Parted output: \n%s' % out) reread_partitions(dev, out=out)
def do_configdrive(self): cc_output_path = os.path.join(CONF.tmp_path, 'cloud_config.txt') bh_output_path = os.path.join(CONF.tmp_path, 'boothook.txt') # NOTE:file should be strictly named as 'user-data' # the same is for meta-data as well ud_output_path = os.path.join(CONF.tmp_path, 'user-data') md_output_path = os.path.join(CONF.tmp_path, 'meta-data') tmpl_dir = CONF.nc_template_path utils.render_and_save( tmpl_dir, self.configdrive_scheme.template_names('cloud_config'), self.configdrive_scheme.template_data(), cc_output_path) utils.render_and_save( tmpl_dir, self.configdrive_scheme.template_names('boothook'), self.configdrive_scheme.template_data(), bh_output_path) utils.render_and_save( tmpl_dir, self.configdrive_scheme.template_names('meta-data'), self.configdrive_scheme.template_data(), md_output_path) utils.execute('write-mime-multipart', '--output=%s' % ud_output_path, '%s:text/cloud-boothook' % bh_output_path, '%s:text/cloud-config' % cc_output_path) utils.execute('genisoimage', '-output', CONF.config_drive_path, '-volid', 'cidata', '-joliet', '-rock', ud_output_path, md_output_path) configdrive_device = self.partition_scheme.configdrive_device() if configdrive_device is None: raise errors.WrongPartitionSchemeError( 'Error while trying to get configdrive device: ' 'configdrive device not found') self.image_scheme.add_image(uri='file://%s' % CONF.config_drive_path, target_device=configdrive_device, image_format='iso9660', container='raw')
def root_device(self): fs = self.fs_by_mount('/') if not fs: raise errors.WrongPartitionSchemeError( 'Error while trying to find root device: ' 'root file system not found') return fs.device
def root_device(self): for fs in self.fss: if fs.mount == '/': return fs.device raise errors.WrongPartitionSchemeError( 'Error while trying to find root device: ' 'root file system not found')
def validate(scheme): """Validates a given partition scheme using jsonschema. :param scheme: partition scheme to validate """ try: checker = jsonschema.FormatChecker() jsonschema.validate(scheme, KS_SPACES_SCHEMA, format_checker=checker) except Exception as exc: raise errors.WrongPartitionSchemeError(str(exc)) # scheme is not valid if the number of disks is 0 if not [d for d in scheme if d['type'] == 'disk']: raise errors.WrongPartitionSchemeError('Partition scheme seems empty') for space in scheme: for volume in space.get('volumes', []): if volume['size'] > 16777216 and volume['mount'] == '/': raise errors.WrongPartitionSchemeError( 'Root file system must be less than 16T')
def _add_configdrive_image(self): configdrive_device = self.driver.partition_scheme.configdrive_device() if configdrive_device is None: raise errors.WrongPartitionSchemeError( 'Error while trying to get configdrive device: ' 'configdrive device not found') size = os.path.getsize(CONF.config_drive_path) md5 = utils.calculate_md5(CONF.config_drive_path, size) self.driver.image_scheme.add_image( uri='file://%s' % CONF.config_drive_path, target_device=configdrive_device, format='iso9660', container='raw', size=size, md5=md5, )
def boot_disks(self): """Property to get suitable list of disks to place '/boot' :returns: list of disk where boot partition can be placed """ # FIXME(agordeev): NVMe drives should be skipped as # accessing such drives during the boot typically # requires using UEFI which is still not supported # by fuel-agent (it always installs BIOS variant of # grub) # * grub bug (http://savannah.gnu.org/bugs/?41883) disks = self.ks_disks suitable_disks = [ disk for disk in disks if ('nvme' not in disk['name'] and self._is_boot_disk(disk)) ] # NOTE(agordeev) sometimes, there's no separate /boot fs image. # Therefore bootloader should be installed into # the disk where rootfs image lands. Ironic's case. if not suitable_disks and not self._have_boot_partition(disks): return [d for d in disks if self._is_root_disk(d) and 'nvme' not in d['name']] # FIXME(agordeev): if we have rootfs on fake raid, then /boot should # land on it too. We can't proceed with grub-install otherwise. md_boot_disks = [ disk for disk in self.md_os_disks if disk in suitable_disks] if md_boot_disks: disks = md_boot_disks else: disks = suitable_disks bootable_disk = [disk for disk in disks if disk.get('bootable')] if bootable_disk: if len(bootable_disk) >= 2: raise errors.WrongPartitionSchemeError( "More than one bootable disk found! %{0}". format(bootable_disk)) return bootable_disk return disks
def parse_partition_scheme(self): LOG.debug('--- Preparing partition scheme ---') data = self.partition_data() ks_spaces_validator.validate(data) partition_scheme = objects.PartitionScheme() ceph_osds = self._num_ceph_osds() journals_left = ceph_osds ceph_journals = self._num_ceph_journals() LOG.debug('Looping over all disks in provision data') for disk in self.ks_disks: # skipping disk if there are no volumes with size >0 # to be allocated on it which are not boot partitions if all(( v["size"] <= 0 for v in disk["volumes"] if v["type"] != "boot" and v.get("mount") != "/boot" )): continue LOG.debug('Processing disk %s' % disk['name']) LOG.debug('Adding gpt table on disk %s' % disk['name']) parted = partition_scheme.add_parted( name=self._disk_dev(disk), label='gpt') if disk in self.boot_disks: # we install bootloader only on every suitable disk LOG.debug('Adding bootloader stage0 on disk %s' % disk['name']) parted.install_bootloader = True # legacy boot partition LOG.debug('Adding bios_grub partition on disk %s: size=24' % disk['name']) parted.add_partition(size=24, flags=['bios_grub']) # uefi partition (for future use) LOG.debug('Adding UEFI partition on disk %s: size=200' % disk['name']) parted.add_partition(size=200) LOG.debug('Looping over all volumes on disk %s' % disk['name']) for volume in disk['volumes']: LOG.debug('Processing volume: ' 'name=%s type=%s size=%s mount=%s vg=%s' % (volume.get('name'), volume.get('type'), volume.get('size'), volume.get('mount'), volume.get('vg'))) if volume['size'] <= 0: LOG.debug('Volume size is zero. Skipping.') continue if volume.get('name') == 'cephjournal': LOG.debug('Volume seems to be a CEPH journal volume. ' 'Special procedure is supposed to be applied.') # We need to allocate a journal partition for each ceph OSD # Determine the number of journal partitions we need on # each device ratio = int(math.ceil(float(ceph_osds) / ceph_journals)) # No more than 10GB will be allocated to a single journal # partition size = volume["size"] / ratio if size > 10240: size = 10240 # This will attempt to evenly spread partitions across # multiple devices e.g. 5 osds with 2 journal devices will # create 3 partitions on the first device and 2 on the # second if ratio < journals_left: end = ratio else: end = journals_left for i in range(0, end): journals_left -= 1 if volume['type'] == 'partition': LOG.debug('Adding CEPH journal partition on ' 'disk %s: size=%s' % (disk['name'], size)) prt = parted.add_partition(size=size) LOG.debug('Partition name: %s' % prt.name) if 'partition_guid' in volume: LOG.debug('Setting partition GUID: %s' % volume['partition_guid']) prt.set_guid(volume['partition_guid']) continue if volume['type'] in ('partition', 'pv', 'raid'): if volume.get('mount') != '/boot': LOG.debug('Adding partition on disk %s: size=%s' % (disk['name'], volume['size'])) prt = parted.add_partition( size=volume['size'], keep_data=volume.get('keep_data', False)) LOG.debug('Partition name: %s' % prt.name) elif volume.get('mount') == '/boot' \ and not self._boot_partition_done \ and disk in self.boot_disks: LOG.debug('Adding /boot partition on disk %s: ' 'size=%s', disk['name'], volume['size']) prt = parted.add_partition( size=volume['size'], keep_data=volume.get('keep_data', False)) LOG.debug('Partition name: %s', prt.name) self._boot_partition_done = True else: LOG.debug('No need to create partition on disk %s. ' 'Skipping.', disk['name']) continue if volume['type'] == 'partition': if 'partition_guid' in volume: LOG.debug('Setting partition GUID: %s' % volume['partition_guid']) prt.set_guid(volume['partition_guid']) if 'mount' in volume and volume['mount'] != 'none': LOG.debug('Adding file system on partition: ' 'mount=%s type=%s' % (volume['mount'], volume.get('file_system', 'xfs'))) partition_scheme.add_fs( device=prt.name, mount=volume['mount'], fs_type=volume.get('file_system', 'xfs'), fs_label=volume.get('disk_label')) if volume['mount'] == '/boot' and not self._boot_done: self._boot_done = True if volume['type'] == 'pv': LOG.debug('Creating pv on partition: pv=%s vg=%s' % (prt.name, volume['vg'])) lvm_meta_size = volume.get('lvm_meta_size', 64) # The reason for that is to make sure that # there will be enough space for creating logical volumes. # Default lvm extension size is 4M. Nailgun volume # manager does not care of it and if physical volume size # is 4M * N + 3M and lvm metadata size is 4M * L then only # 4M * (N-L) + 3M of space will be available for # creating logical extensions. So only 4M * (N-L) of space # will be available for logical volumes, while nailgun # volume manager might reguire 4M * (N-L) + 3M # logical volume. Besides, parted aligns partitions # according to its own algorithm and actual partition might # be a bit smaller than integer number of mebibytes. if lvm_meta_size < 10: raise errors.WrongPartitionSchemeError( 'Error while creating physical volume: ' 'lvm metadata size is too small') metadatasize = int(math.floor((lvm_meta_size - 8) / 2)) metadatacopies = 2 partition_scheme.vg_attach_by_name( pvname=prt.name, vgname=volume['vg'], metadatasize=metadatasize, metadatacopies=metadatacopies) if volume['type'] == 'raid': if 'mount' in volume and \ volume['mount'] not in ('none', '/boot'): LOG.debug('Attaching partition to RAID ' 'by its mount point %s' % volume['mount']) metadata = 'default' if self.have_grub1_by_default: metadata = '0.90' LOG.debug('Going to use MD metadata version {0}. ' 'The version was guessed at the data has ' 'been given about the operating system.' .format(metadata)) partition_scheme.md_attach_by_mount( device=prt.name, mount=volume['mount'], fs_type=volume.get('file_system', 'xfs'), fs_label=volume.get('disk_label'), metadata=metadata) if 'mount' in volume and volume['mount'] == '/boot' and \ not self._boot_done: LOG.debug('Adding file system on partition: ' 'mount=%s type=%s' % (volume['mount'], volume.get('file_system', 'ext2'))) partition_scheme.add_fs( device=prt.name, mount=volume['mount'], fs_type=volume.get('file_system', 'ext2'), fs_label=volume.get('disk_label')) self._boot_done = True # this partition will be used to put there configdrive image if (partition_scheme.configdrive_device() is None and self._needs_configdrive() and (self._is_root_disk(disk) or self._is_os_disk(disk))): LOG.debug('Adding configdrive partition on disk %s: size=20' % disk['name']) parted.add_partition(size=20, configdrive=True) # checking if /boot is expected to be created if self._have_boot_partition(self.ks_disks) and \ (not self._boot_partition_done or not self._boot_done): raise errors.WrongPartitionSchemeError( '/boot partition has not been created for some reasons') # checking if configdrive partition is created if (not partition_scheme.configdrive_device() and self._needs_configdrive()): raise errors.WrongPartitionSchemeError( 'configdrive partition has not been created for some reasons') LOG.debug('Looping over all volume groups in provision data') for vg in self.ks_vgs: LOG.debug('Processing vg %s' % vg['id']) LOG.debug('Looping over all logical volumes in vg %s' % vg['id']) for volume in vg['volumes']: LOG.debug('Processing lv %s' % volume['name']) if volume['size'] <= 0: LOG.debug('LogicalVolume size is zero. Skipping.') continue if volume['type'] == 'lv': LOG.debug('Adding lv to vg %s: name=%s, size=%s' % (vg['id'], volume['name'], volume['size'])) lv = partition_scheme.add_lv(name=volume['name'], vgname=vg['id'], size=volume['size']) if 'mount' in volume and volume['mount'] != 'none': LOG.debug('Adding file system on lv: ' 'mount=%s type=%s' % (volume['mount'], volume.get('file_system', 'xfs'))) partition_scheme.add_fs( device=lv.device_name, mount=volume['mount'], fs_type=volume.get('file_system', 'xfs'), fs_label=volume.get('disk_label')) partition_scheme.elevate_keep_data() return partition_scheme
def partition_scheme(self): LOG.debug('--- Preparing partition scheme ---') data = self.partition_data() ks_spaces_validator.validate(data) partition_scheme = objects.PartitionScheme() ceph_osds = self._num_ceph_osds() journals_left = ceph_osds ceph_journals = self._num_ceph_journals() LOG.debug('Looping over all disks in provision data') for disk in self.ks_disks: LOG.debug('Processing disk %s' % disk['name']) LOG.debug('Adding gpt table on disk %s' % disk['name']) parted = partition_scheme.add_parted(name=self._disk_dev(disk), label='gpt') # we install bootloader on every disk LOG.debug('Adding bootloader stage0 on disk %s' % disk['name']) parted.install_bootloader = True # legacy boot partition LOG.debug('Adding bios_grub partition on disk %s: size=24' % disk['name']) parted.add_partition(size=24, flags=['bios_grub']) # uefi partition (for future use) LOG.debug('Adding UEFI partition on disk %s: size=200' % disk['name']) parted.add_partition(size=200) LOG.debug('Looping over all volumes on disk %s' % disk['name']) for volume in disk['volumes']: LOG.debug('Processing volume: ' 'name=%s type=%s size=%s mount=%s vg=%s' % (volume.get('name'), volume.get('type'), volume.get('size'), volume.get('mount'), volume.get('vg'))) if volume['size'] <= 0: LOG.debug('Volume size is zero. Skipping.') continue if volume.get('name') == 'cephjournal': LOG.debug('Volume seems to be a CEPH journal volume. ' 'Special procedure is supposed to be applied.') # We need to allocate a journal partition for each ceph OSD # Determine the number of journal partitions we need on # each device ratio = math.ceil(float(ceph_osds) / ceph_journals) # No more than 10GB will be allocated to a single journal # partition size = volume["size"] / ratio if size > 10240: size = 10240 # This will attempt to evenly spread partitions across # multiple devices e.g. 5 osds with 2 journal devices will # create 3 partitions on the first device and 2 on the # second if ratio < journals_left: end = ratio else: end = journals_left for i in range(0, end): journals_left -= 1 if volume['type'] == 'partition': LOG.debug('Adding CEPH journal partition on ' 'disk %s: size=%s' % (disk['name'], size)) prt = parted.add_partition(size=size) LOG.debug('Partition name: %s' % prt.name) if 'partition_guid' in volume: LOG.debug('Setting partition GUID: %s' % volume['partition_guid']) prt.set_guid(volume['partition_guid']) continue if volume['type'] in ('partition', 'pv', 'raid'): LOG.debug('Adding partition on disk %s: size=%s' % (disk['name'], volume['size'])) prt = parted.add_partition(size=volume['size']) LOG.debug('Partition name: %s' % prt.name) if volume['type'] == 'partition': if 'partition_guid' in volume: LOG.debug('Setting partition GUID: %s' % volume['partition_guid']) prt.set_guid(volume['partition_guid']) if 'mount' in volume and volume['mount'] != 'none': LOG.debug('Adding file system on partition: ' 'mount=%s type=%s' % (volume['mount'], volume.get('file_system', 'xfs'))) partition_scheme.add_fs( device=prt.name, mount=volume['mount'], fs_type=volume.get('file_system', 'xfs'), fs_label=self._getlabel(volume.get('disk_label'))) if volume['type'] == 'pv': LOG.debug('Creating pv on partition: pv=%s vg=%s' % (prt.name, volume['vg'])) lvm_meta_size = volume.get('lvm_meta_size', 64) # The reason for that is to make sure that # there will be enough space for creating logical volumes. # Default lvm extension size is 4M. Nailgun volume # manager does not care of it and if physical volume size # is 4M * N + 3M and lvm metadata size is 4M * L then only # 4M * (N-L) + 3M of space will be available for # creating logical extensions. So only 4M * (N-L) of space # will be available for logical volumes, while nailgun # volume manager might reguire 4M * (N-L) + 3M # logical volume. Besides, parted aligns partitions # according to its own algorithm and actual partition might # be a bit smaller than integer number of mebibytes. if lvm_meta_size < 10: raise errors.WrongPartitionSchemeError( 'Error while creating physical volume: ' 'lvm metadata size is too small') metadatasize = int(math.floor((lvm_meta_size - 8) / 2)) metadatacopies = 2 partition_scheme.vg_attach_by_name( pvname=prt.name, vgname=volume['vg'], metadatasize=metadatasize, metadatacopies=metadatacopies) if volume['type'] == 'raid': if 'mount' in volume and volume['mount'] != 'none': LOG.debug('Attaching partition to RAID ' 'by its mount point %s' % volume['mount']) partition_scheme.md_attach_by_mount( device=prt.name, mount=volume['mount'], fs_type=volume.get('file_system', 'xfs'), fs_label=self._getlabel(volume.get('disk_label'))) # this partition will be used to put there configdrive image if partition_scheme.configdrive_device() is None: LOG.debug('Adding configdrive partition on disk %s: size=20' % disk['name']) parted.add_partition(size=20, configdrive=True) LOG.debug('Looping over all volume groups in provision data') for vg in self.ks_vgs: LOG.debug('Processing vg %s' % vg['id']) LOG.debug('Looping over all logical volumes in vg %s' % vg['id']) for volume in vg['volumes']: LOG.debug('Processing lv %s' % volume['name']) if volume['size'] <= 0: LOG.debug('Lv size is zero. Skipping.') continue if volume['type'] == 'lv': LOG.debug('Adding lv to vg %s: name=%s, size=%s' % (vg['id'], volume['name'], volume['size'])) lv = partition_scheme.add_lv(name=volume['name'], vgname=vg['id'], size=volume['size']) if 'mount' in volume and volume['mount'] != 'none': LOG.debug('Adding file system on lv: ' 'mount=%s type=%s' % (volume['mount'], volume.get('file_system', 'xfs'))) partition_scheme.add_fs( device=lv.device_name, mount=volume['mount'], fs_type=volume.get('file_system', 'xfs'), fs_label=self._getlabel(volume.get('disk_label'))) LOG.debug('Appending kernel parameters: %s' % self.data['ks_meta']['pm_data']['kernel_params']) partition_scheme.append_kernel_params( self.data['ks_meta']['pm_data']['kernel_params']) return partition_scheme
def do_bootloader(self): LOG.debug('--- Installing bootloader (do_bootloader) ---') chroot = '/tmp/target' self.mount_target(chroot) mount2uuid = {} for fs in self.driver.partition_scheme.fss: mount2uuid[fs.mount] = utils.execute( 'blkid', '-c', '/dev/null', '-o', 'value', '-s', 'UUID', fs.device, check_exit_code=[0])[0].strip() if '/' not in mount2uuid: raise errors.WrongPartitionSchemeError( 'Error: device with / mountpoint has not been found') # NOTE(sslypushenko) Due to possible races between LVM and multipath, # we need to adjust LVM devices filter. # This code is required only for Ubuntu 14.04, because in trusty, # LVM filters, does not recognize partions on multipath devices # out of the box. It is fixed in latest LVM versions multipath_devs = [parted.name for parted in self.driver.partition_scheme.parteds if hw.is_multipath_device(parted.name)] # If there are no multipath devices on the node, we should not do # anything to prevent regression. if multipath_devs: # We need to explicitly whitelist each non-mutlipath device lvm_filter = [] for parted in self.driver.partition_scheme.parteds: device = parted.name if device in multipath_devs: continue # We use devlinks from /dev/disk/by-id instead of /dev/sd*, # because the first one are persistent. devlinks_by_id = [ link for link in hw.udevreport(device).get('DEVLINKS', []) if link.startswith('/dev/disk/by-id/')] for link in devlinks_by_id: lvm_filter.append( 'a|^{}(p)?(-part)?[0-9]*|'.format(link)) # Multipath devices should be whitelisted. All other devlinks # should be blacklisted, to prevent LVM from grubbing underlying # multipath devices. lvm_filter.extend(CONF.lvm_filter_for_mpath) # Setting devices/preferred_names also helps LVM to find devices by # the proper devlinks bu.override_lvm_config( chroot, {'devices': { 'scan': CONF.mpath_lvm_scan_dirs, 'global_filter': lvm_filter, 'preferred_names': CONF.mpath_lvm_preferred_names}}, lvm_conf_path=CONF.lvm_conf_path, update_initramfs=True) grub = self.driver.grub guessed_version = gu.guess_grub_version(chroot=chroot) if guessed_version != grub.version: grub.version = guessed_version LOG.warning('Grub version differs from which the operating system ' 'should have by default. Found version in image: ' '{0}'.format(guessed_version)) boot_device = self.driver.partition_scheme.boot_device(grub.version) install_devices = [d.name for d in self.driver.partition_scheme.parteds if d.install_bootloader] grub.append_kernel_params('root=UUID=%s ' % mount2uuid['/']) kernel = grub.kernel_name or gu.guess_kernel(chroot=chroot, regexp=grub.kernel_regexp) initrd = grub.initrd_name or gu.guess_initrd(chroot=chroot, regexp=grub.initrd_regexp) if grub.version == 1: gu.grub1_cfg(kernel=kernel, initrd=initrd, kernel_params=grub.kernel_params, chroot=chroot, grub_timeout=CONF.grub_timeout) gu.grub1_install(install_devices, boot_device, chroot=chroot) else: # TODO(kozhukalov): implement which kernel to use by default # Currently only grub1_cfg accepts kernel and initrd parameters. gu.grub2_cfg(kernel_params=grub.kernel_params, chroot=chroot, grub_timeout=CONF.grub_timeout) gu.grub2_install(install_devices, chroot=chroot) if CONF.fix_udev_net_rules: # FIXME(agordeev) There's no convenient way to perfrom NIC # remapping in Ubuntu, so injecting files prior the first boot # should work with open(chroot + '/etc/udev/rules.d/70-persistent-net.rules', 'wt', encoding='utf-8') as f: f.write(u'# Generated by fuel-agent during provisioning: ' u'BEGIN\n') # pattern is aa:bb:cc:dd:ee:ff_eth0,aa:bb:cc:dd:ee:ff_eth1 for mapping in self.driver.configdrive_scheme. \ common.udevrules.split(','): mac_addr, nic_name = mapping.split('_') f.write(u'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ' u'ATTR{address}=="%s", ATTR{type}=="1", ' u'KERNEL=="eth*", NAME="%s"\n' % (mac_addr, nic_name)) f.write( u'# Generated by fuel-agent during provisioning: END\n') # FIXME(agordeev): Disable net-generator that will add new etries # to 70-persistent-net.rules with open(chroot + '/etc/udev/rules.d/' '75-persistent-net-generator.rules', 'wt', encoding='utf-8') as f: f.write(u'# Generated by fuel-agent during provisioning:\n' u'# DO NOT DELETE. It is needed to disable ' u'net-generator\n') # FIXME(kozhukalov): Prevent nailgun-agent from doing anything. # This ugly hack is to be used together with the command removing # this lock file not earlier than /etc/rc.local # The reason for this hack to appear is to prevent nailgun-agent from # changing mcollective config at the same time when cloud-init # does the same. Otherwise, we can end up with corrupted mcollective # config. For details see https://bugs.launchpad.net/fuel/+bug/1449186 LOG.debug('Preventing nailgun-agent from doing ' 'anything until it is unlocked') utils.makedirs_if_not_exists(os.path.join(chroot, 'etc/nailgun-agent')) with open(os.path.join(chroot, 'etc/nailgun-agent/nodiscover'), 'w'): pass # FIXME(kozhukalov): When we have just os-root fs image and don't have # os-var-log fs image while / and /var/log are supposed to be # separate file systems and os-var-log is mounted into # non-empty directory on the / file system, those files in /var/log # directory become unavailable. # The thing is that among those file there is /var/log/upstart # where upstart daemon writes its logs. We have specific upstart job # which is to flush open files once all file systems are mounted. # This job needs upstart directory to be available on os-var-log # file system. # This is just a temporary fix and correct fix will be available soon # via updates. utils.execute('mkdir', '-p', chroot + '/var/log/upstart') with open(chroot + '/etc/fstab', 'wt', encoding='utf-8') as f: for fs in self.driver.partition_scheme.fss: # TODO(kozhukalov): Think of improving the logic so as to # insert a meaningful fsck order value which is last zero # at fstab line. Currently we set it into 0 which means # a corresponding file system will never be checked. We assume # puppet or other configuration tool will care of it. if fs.mount == '/': f.write(u'UUID=%s %s %s defaults,errors=panic 0 0\n' % (mount2uuid[fs.mount], fs.mount, fs.type)) else: f.write(u'UUID=%s %s %s defaults 0 0\n' % (mount2uuid[fs.mount], fs.mount, fs.type)) self.umount_target(chroot)
def do_bootloader(self): LOG.debug('--- Installing bootloader (do_bootloader) ---') chroot = '/tmp/target' self.mount_target(chroot) mount2uuid = {} for fs in self.driver.partition_scheme.fss: mount2uuid[fs.mount] = utils.execute( 'blkid', '-o', 'value', '-s', 'UUID', fs.device, check_exit_code=[0])[0].strip() if '/' not in mount2uuid: raise errors.WrongPartitionSchemeError( 'Error: device with / mountpoint has not been found') grub = self.driver.grub guessed_version = gu.guess_grub_version(chroot=chroot) if guessed_version != grub.version: grub.version = guessed_version LOG.warning('Grub version differs from which the operating system ' 'should have by default. Found version in image: ' '{0}'.format(guessed_version)) boot_device = self.driver.partition_scheme.boot_device(grub.version) install_devices = [ d.name for d in self.driver.partition_scheme.parteds if d.install_bootloader ] grub.append_kernel_params('root=UUID=%s ' % mount2uuid['/']) kernel = grub.kernel_name or \ gu.guess_kernel(chroot=chroot, regexp=grub.kernel_regexp) initrd = grub.initrd_name or \ gu.guess_initrd(chroot=chroot, regexp=grub.initrd_regexp) if grub.version == 1: gu.grub1_cfg(kernel=kernel, initrd=initrd, kernel_params=grub.kernel_params, chroot=chroot, grub_timeout=CONF.grub_timeout) gu.grub1_install(install_devices, boot_device, chroot=chroot) else: # TODO(kozhukalov): implement which kernel to use by default # Currently only grub1_cfg accepts kernel and initrd parameters. gu.grub2_cfg(kernel_params=grub.kernel_params, chroot=chroot, grub_timeout=CONF.grub_timeout) gu.grub2_install(install_devices, chroot=chroot) if CONF.fix_udev_net_rules: # FIXME(agordeev) There's no convenient way to perfrom NIC # remapping in Ubuntu, so injecting files prior the first boot # should work with open(chroot + '/etc/udev/rules.d/70-persistent-net.rules', 'wt', encoding='utf-8') as f: f.write(u'# Generated by fuel-agent during provisioning: ' u'BEGIN\n') # pattern is aa:bb:cc:dd:ee:ff_eth0,aa:bb:cc:dd:ee:ff_eth1 for mapping in self.driver.configdrive_scheme.\ common.udevrules.split(','): mac_addr, nic_name = mapping.split('_') f.write(u'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ' u'ATTR{address}=="%s", ATTR{type}=="1", ' u'KERNEL=="eth*", NAME="%s"\n' % (mac_addr, nic_name)) f.write( u'# Generated by fuel-agent during provisioning: END\n') # FIXME(agordeev): Disable net-generator that will add new etries # to 70-persistent-net.rules with open(chroot + '/etc/udev/rules.d/75-persistent-net-generator.rules', 'wt', encoding='utf-8') as f: f.write(u'# Generated by fuel-agent during provisioning:\n' u'# DO NOT DELETE. It is needed to disable ' u'net-generator\n') # FIXME(kozhukalov): Prevent nailgun-agent from doing anything. # This ugly hack is to be used together with the command removing # this lock file not earlier than /etc/rc.local # The reason for this hack to appear is to prevent nailgun-agent from # changing mcollective config at the same time when cloud-init # does the same. Otherwise, we can end up with corrupted mcollective # config. For details see https://bugs.launchpad.net/fuel/+bug/1449186 LOG.debug('Preventing nailgun-agent from doing ' 'anything until it is unlocked') utils.makedirs_if_not_exists(os.path.join(chroot, 'etc/nailgun-agent')) with open(os.path.join(chroot, 'etc/nailgun-agent/nodiscover'), 'w'): pass # FIXME(kozhukalov): When we have just os-root fs image and don't have # os-var-log fs image while / and /var/log are supposed to be # separate file systems and os-var-log is mounted into # non-empty directory on the / file system, those files in /var/log # directory become unavailable. # The thing is that among those file there is /var/log/upstart # where upstart daemon writes its logs. We have specific upstart job # which is to flush open files once all file systems are mounted. # This job needs upstart directory to be available on os-var-log # file system. # This is just a temporary fix and correct fix will be available soon # via updates. utils.execute('mkdir', '-p', chroot + '/var/log/upstart') with open(chroot + '/etc/fstab', 'wt', encoding='utf-8') as f: for fs in self.driver.partition_scheme.fss: # TODO(kozhukalov): Think of improving the logic so as to # insert a meaningful fsck order value which is last zero # at fstab line. Currently we set it into 0 which means # a corresponding file system will never be checked. We assume # puppet or other configuration tool will care of it. if fs.mount == '/': f.write(u'UUID=%s %s %s defaults,errors=panic 0 0\n' % (mount2uuid[fs.mount], fs.mount, fs.type)) else: f.write(u'UUID=%s %s %s defaults 0 0\n' % (mount2uuid[fs.mount], fs.mount, fs.type)) self.umount_target(chroot)