def purge_disk_cache(dispatcher, path): geom.scan() delete = False disk = get_disk_by_path(path) if not disk: return if disk['is_multipath']: # Looks like one path was removed logger.info('Path %s to disk <%s> (%s) was removed', path, disk['id'], disk['description']) disk['multipath.members'].remove(path) # Was this last path? if len(disk['multipath.members']) == 0: logger.info('Disk %s <%s> (%s) was removed (last path is gone)', path, disk['id'], disk['description']) diskinfo_cache.remove(disk['id']) delete = True else: diskinfo_cache.put(disk['id'], disk) else: logger.info('Disk %s <%s> (%s) was removed', path, disk['id'], disk['description']) diskinfo_cache.remove(disk['id']) delete = True if delete: # Mark disk for auto-delete ds_disk = dispatcher.datastore.get_by_id('disks', disk['id']) ds_disk['delete_at'] = datetime.now() + EXPIRE_TIMEOUT dispatcher.datastore.update('disks', ds_disk['id'], ds_disk)
def destroy_upload_location(self): geom.scan() klass_label = geom.class_by_name('LABEL') prov = klass_label.xml.find( f'.//provider[name = "label/{UPLOAD_LABEL}"]/../consumer/provider') if prov is None: return klass_md = geom.class_by_name('MD') prov = klass_md.xml.find( f'.//provider[@id = "{prov.attrib["ref"]}"]/name') if prov is None: return mddev = prov.text subprocess.run( ['umount', f'/dev/label/{UPLOAD_LABEL}'], capture_output=True, check=False, ) cp = subprocess.run( ['mdconfig', '-d', '-u', mddev], text=True, capture_output=True, check=False, ) if cp.returncode != 0: raise CallError(f'Could not destroy memory device: {cp.stderr}')
def list_partitions(self, disk): geom.scan() klass = geom.class_by_name('PART') parts = [] for g in klass.xml.findall(f'./geom[name=\'{disk}\']'): for p in g.findall('./provider'): size = p.find('./mediasize') if size is not None: try: size = int(size.text) except ValueError: size = None name = p.find('./name') part_type = p.find('./config/type') if part_type is not None: part_type = self.middleware.call_sync( 'disk.get_partition_uuid_from_name', part_type.text) if not part_type: part_type = 'UNKNOWN' part = { 'name': name.text, 'size': size, 'partition_type': part_type, 'disk': disk, 'id': p.get('id'), 'path': os.path.join('/dev', name.text), 'encrypted_provider': None, } if os.path.exists(f'{part["path"]}.eli'): part['encrypted_provider'] = f'{part["path"]}.eli' parts.append(part) return parts
def get_disks(self, name): try: with libzfs.ZFS() as zfs: disks = list(zfs.get(name).disks) except libzfs.ZFSException as e: raise CallError(str(e), errno.ENOENT) geom.scan() labelclass = geom.class_by_name('LABEL') for absdev in disks: dev = absdev.replace('/dev/', '').replace('.eli', '') find = labelclass.xml.findall( f".//provider[name='{dev}']/../consumer/provider") name = None if find: name = geom.provider_by_id(find[0].get('ref')).geom.name else: g = geom.geom_by_name('DEV', dev) if g: name = g.consumer.provider.geom.name if name and geom.geom_by_name('DISK', name): yield name else: self.logger.debug(f'Could not find disk for {dev}')
def gptid_from_part_type(self, disk, part_type): geom.scan() g = geom.class_by_name('PART') uuid = g.xml.find( f'.//geom[name="{disk}"]//config/[type="{part_type}"]/rawuuid') if uuid is None: raise ValueError(f'Partition type {part_type} not found on {disk}') return f'gptid/{uuid.text}'
def label_to_disk(self, label, geom_scan=True): if geom_scan: geom.scan() dev = self.label_to_dev(label, geom_scan=False) or label part = geom.class_by_name('PART').xml.find( f'.//provider[name="{dev}"]/../name') if part is not None: return part.text
def get_swap_size(self, disk): geom.scan() labelclass = geom.class_by_name('PART') length = labelclass.xml.find( f".//geom[name='{disk}']/provider/config[type='freebsd-swap']/length" ) if length is None: return None return int(length.text)
def get_encrypted(self, options): """ Get all geli providers It might be an entire disk or a partition of type freebsd-zfs. Before a geli encrypted pool can be imported, disks used in the pool should be decrypted and then pool import can proceed as desired. In that case `unused` can be passed as `true`, to find out which disks are geli encrypted but not being used by active ZFS pools. """ providers = [] disks_blacklist = [] if options['unused']: disks_blacklist += self.middleware.call_sync('disk.get_reserved') geom.scan() klass_part = geom.class_by_name('PART') klass_label = geom.class_by_name('LABEL') if not klass_part: return providers for g in klass_part.geoms: for p in g.providers: if p.config is None: continue if p.config['type'] != 'freebsd-zfs': continue disk = p.geom.consumer.provider.name if disk in disks_blacklist: continue try: subprocess.run( ['geli', 'dump', p.name], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True, ) except subprocess.CalledProcessError: continue dev = None if klass_label: for g in klass_label.geoms: if g.name == p.name: dev = g.provider.name break if dev is None: dev = p.name providers.append({ 'name': p.name, 'dev': dev, 'disk': disk }) return providers
def label_to_dev(self, label, geom_scan=True): if label.endswith('.nop'): label = label[:-4] elif label.endswith('.eli'): label = label[:-4] if geom_scan: geom.scan() klass = geom.class_by_name('LABEL') prov = klass.xml.find(f'.//provider[name="{label}"]/../name') if prov is not None: return prov.text
def get_encrypted(self, options): """ Get all geli providers It might be an entire disk or a partition of type freebsd-zfs """ providers = [] disks_blacklist = [] if options['unused']: disks_blacklist += self.middleware.call_sync('disk.get_reserved') geom.scan() klass_part = geom.class_by_name('PART') klass_label = geom.class_by_name('LABEL') if not klass_part: return providers for g in klass_part.geoms: for p in g.providers: if p.config['type'] != 'freebsd-zfs': continue disk = p.geom.consumer.provider.name if disk in disks_blacklist: continue try: subprocess.run( ['geli', 'dump', p.name], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True, ) except subprocess.CalledProcessError: continue dev = None if klass_label: for g in klass_label.geoms: if g.name == p.name: dev = g.provider.name break if dev is None: dev = p.name providers.append({ 'name': p.name, 'dev': dev, 'disk': disk }) return providers
def _get_class_disk(self): result = [] geom.scan() for child in geom.class_by_name('DISK').geoms: result.append({ "path": os.path.join("/dev", child.name), "name": child.name, "mediasize": child.provider.mediasize, "description": child.provider.config['descr'] }) return result
def get_encrypted(self, options): """ Get all geli providers It might be an entire disk or a partition of type freebsd-zfs """ providers = [] disks_blacklist = [] if options['unused']: disks_blacklist += self.middleware.call_sync('disk.get_reserved') geom.scan() klass_part = geom.class_by_name('PART') klass_label = geom.class_by_name('LABEL') if not klass_part: return providers for g in klass_part.geoms: for p in g.providers: if p.config['type'] != 'freebsd-zfs': continue disk = p.geom.consumer.provider.name if disk in disks_blacklist: continue try: subprocess.run( ['geli', 'dump', p.name], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True, ) except subprocess.CalledProcessError: continue dev = None if klass_label: for g in klass_label.geoms: if g.name == p.name: dev = g.provider.name break if dev is None: dev = p.name providers.append({'name': p.name, 'dev': dev, 'disk': disk}) return providers
def clean_multipaths(dispatcher): global multipaths geom.scan() cls = geom.class_by_name('MULTIPATH') if cls: for i in cls.geoms: logger.info('Destroying multipath device %s', i.name) dispatcher.exec_and_wait_for_event( 'system.device.detached', lambda args: args[ 'path'] == '/dev/multipath/{0}'.format(i.name), lambda: system('/sbin/gmultipath', 'destroy', i.name)) multipaths = -1
def clean_multipaths(dispatcher): global multipaths geom.scan() cls = geom.class_by_name('MULTIPATH') if cls: for i in cls.geoms: logger.info('Destroying multipath device %s', i.name) dispatcher.exec_and_wait_for_event( 'system.device.detached', lambda args: args['path'] == '/dev/multipath/{0}'.format(i.name), lambda: system('/sbin/gmultipath', 'destroy', i.name) ) multipaths = -1
def generate_disk_cache(dispatcher, path): diskinfo_cache_lock.acquire() geom.scan() name = os.path.basename(path) gdisk = geom.geom_by_name('DISK', name) multipath_info = None disk_info = info_from_device(gdisk.name) serial = disk_info['serial'] identifier = device_to_identifier(name, serial) ds_disk = dispatcher.datastore.get_by_id('disks', identifier) # Path repesents disk device (not multipath device) and has NAA ID attached lunid = gdisk.provider.config.get('lunid') if lunid: # Check if device could be part of multipath configuration d = get_disk_by_lunid(lunid) if (d and d['path'] != path) or (ds_disk and ds_disk['is_multipath']): multipath_info = attach_to_multipath(dispatcher, d, ds_disk, path) provider = gdisk.provider try: camdev = CamDevice(gdisk.name) except RuntimeError: camdev = None disk = wrap({ 'path': path, 'is_multipath': False, 'description': provider.config['descr'], 'serial': serial, 'lunid': provider.config.get('lunid'), 'model': disk_info['model'], 'interface': disk_info['interface'], 'is_ssd': disk_info['is_ssd'], 'id': identifier, 'controller': camdev.__getstate__() if camdev else None, }) if multipath_info: disk.update(multipath_info) diskinfo_cache.put(identifier, disk) update_disk_cache(dispatcher, path) dispatcher.call_sync('disks.configure_disk', identifier) logger.info('Added <%s> (%s) to disk cache', identifier, disk['description']) diskinfo_cache_lock.release()
def _get_class_multipath(self): result = [] geom.scan() cls = geom.class_by_name('MULTIPATH') if not cls: return [] for child in cls.geoms: result.append({ "path": os.path.join("/dev", child.name), "name": child.name, "mediasize": child.provider.mediasize, "members": [c.provider.name for c in child.consumers] }) return result
def create_upload_location(self): geom.scan() klass_label = geom.class_by_name('LABEL') prov = klass_label.xml.find( f'.//provider[name = "label/{UPLOAD_LABEL}"]/../consumer/provider') if prov is None: cp = subprocess.run( ['mdconfig', '-a', '-t', 'swap', '-s', '2800m'], text=True, capture_output=True, check=False, ) if cp.returncode != 0: raise CallError(f'Could not create memory device: {cp.stderr}') mddev = cp.stdout.strip() subprocess.run(['glabel', 'create', UPLOAD_LABEL, mddev], capture_output=True, check=False) cp = subprocess.run( ['newfs', f'/dev/label/{UPLOAD_LABEL}'], text=True, capture_output=True, check=False, ) if cp.returncode != 0: raise CallError( f'Could not create temporary filesystem: {cp.stderr}') shutil.rmtree(UPLOAD_LOCATION, ignore_errors=True) os.makedirs(UPLOAD_LOCATION) cp = subprocess.run( ['mount', f'/dev/label/{UPLOAD_LABEL}', UPLOAD_LOCATION], text=True, capture_output=True, check=False, ) if cp.returncode != 0: raise CallError( f'Could not mount temporary filesystem: {cp.stderr}') shutil.chown(UPLOAD_LOCATION, 'www', 'www') os.chmod(UPLOAD_LOCATION, 0o755) return UPLOAD_LOCATION
def generate_disk_cache(dispatcher, path): diskinfo_cache_lock.acquire() geom.scan() name = os.path.basename(path) gdisk = geom.geom_by_name('DISK', name) multipath_info = None disk_info = info_from_device(gdisk.name) serial = disk_info['serial'] identifier = device_to_identifier(name, serial) ds_disk = dispatcher.datastore.get_by_id('disks', identifier) # Path repesents disk device (not multipath device) and has NAA ID attached lunid = gdisk.provider.config.get('lunid') if lunid: # Check if device could be part of multipath configuration d = get_disk_by_lunid(lunid) if (d and d['path'] != path) or (ds_disk and ds_disk['is_multipath']): multipath_info = attach_to_multipath(dispatcher, d, ds_disk, path) provider = gdisk.provider camdev = CamDevice(gdisk.name) disk = wrap({ 'path': path, 'is_multipath': False, 'description': provider.config['descr'], 'serial': serial, 'lunid': provider.config.get('lunid'), 'model': disk_info['model'], 'interface': disk_info['interface'], 'is_ssd': disk_info['is_ssd'], 'id': identifier, 'controller': camdev.__getstate__(), }) if multipath_info: disk.update(multipath_info) diskinfo_cache.put(identifier, disk) update_disk_cache(dispatcher, path) dispatcher.call_sync('disks.configure_disk', identifier) logger.info('Added <%s> (%s) to disk cache', identifier, disk['description']) diskinfo_cache_lock.release()
def get_disks(self, name): disks = self.get_devices(name) geom.scan() labelclass = geom.class_by_name('LABEL') for dev in disks: dev = dev.replace('.eli', '') find = labelclass.xml.findall(f".//provider[name='{dev}']/../consumer/provider") name = None if find: name = geom.provider_by_id(find[0].get('ref')).geom.name else: g = geom.geom_by_name('DEV', dev) if g: name = g.consumer.provider.geom.name if name and (name.startswith('multipath/') or geom.geom_by_name('DISK', name)): yield name else: self.logger.debug(f'Could not find disk for {dev}')
def get_swap_mirrors(self, filters, options): mirrors = [] geom.scan() klass = geom.class_by_name('MIRROR') if not klass: return mirrors for g in filter(lambda g: not g.name.endswith('.sync'), klass.geoms): mirror_data = { **deepcopy(self.mirror_base), 'name': g.name, 'config_type': g.config.get('Type') if g.config else None, 'path': os.path.join('/dev/mirror', g.name), 'real_path': os.path.join('/dev/mirror', g.name), } if os.path.exists(f'{mirror_data["path"]}.eli'): mirror_data['encrypted_provider'] = f'{mirror_data["path"]}.eli' for c in g.consumers: mirror_data['providers'].append({ 'name': c.provider.name, 'id': c.provider.id, 'disk': c.provider.geom.name }) mirrors.append(mirror_data) return filter_list(mirrors, filters, options)
def update_disk_cache(dispatcher, path): geom.scan() name = os.path.basename(path) gdisk = geom.geom_by_name('DISK', name) gpart = geom.geom_by_name('PART', name) gmultipath = geom.geom_by_name('MULTIPATH', path.split('/')[-1]) disk = get_disk_by_path(path) if not disk: return old_id = disk['id'] if gmultipath: # Path represents multipath device (not disk device) # MEDIACHANGE event -> use first member for hardware queries cons = gmultipath.consumers.next() gdisk = cons.provider.geom if not gdisk: return disk_info = info_from_device(gdisk.name) serial = disk_info['serial'] provider = gdisk.provider partitions = list(generate_partitions_list(gpart)) identifier = device_to_identifier(name, serial) data_part = first_or_default(lambda x: x['type'] == 'freebsd-zfs', partitions) data_uuid = data_part["uuid"] if data_part else None swap_part = first_or_default(lambda x: x['type'] == 'freebsd-swap', partitions) swap_uuid = swap_part["uuid"] if swap_part else None disk.update({ 'mediasize': provider.mediasize, 'sectorsize': provider.sectorsize, 'max_rotation': disk_info['max_rotation'], 'smart_capable': disk_info['smart_capable'], 'smart_enabled': disk_info['smart_enabled'], 'smart_status': disk_info['smart_status'], 'id': identifier, 'schema': gpart.config.get('scheme') if gpart else None, 'partitions': partitions, 'data_partition_uuid': data_uuid, 'data_partition_path': os.path.join("/dev/gptid", data_uuid) if data_uuid else None, 'swap_partition_uuid': swap_uuid, 'swap_partition_path': os.path.join("/dev/gptid", swap_uuid) if swap_uuid else None, }) if gmultipath: disk['multipath.status'] = gmultipath.config['State'] disk['multipath.mode'] = gmultipath.config['Mode'] disk['multipath.uuid'] = gmultipath.config['UUID'] # Purge old cache entry if identifier has changed if old_id != identifier: logger.debug( 'Removing disk cache entry for <%s> because identifier changed', old_id) diskinfo_cache.remove(old_id) diskinfo_cache.put(identifier, disk) dispatcher.datastore.delete('disks', old_id) persist_disk(dispatcher, disk)
def sync_zpool(self, pool): """ Sync enclosure of a given ZFS pool """ # As we are only interfacing with SES we can skip mapping enclosures or working with non-SES enclosures encs = self.__get_enclosures() if len(list(encs)) == 0: self.logger.debug("Enclosure not found, skipping enclosure sync") return None if pool is None: pools = [pool["name"] for pool in self.middleware.call_sync("pool.query")] else: pools = [pool] geom.scan() seen_devs = [] label2disk = {} for pool in pools: try: pool = self.middleware.call_sync("zfs.pool.query", [["name", "=", pool]], {"get": True}) except IndexError: continue label2disk.update({ label: self.middleware.call_sync("disk.label_to_disk", label) for label in self.middleware.call_sync("zfs.pool.get_devices", pool["id"]) }) for dev in self.middleware.call_sync("zfs.pool.find_not_online", pool["id"]): if dev["path"] is None: continue label = dev["path"].replace("/dev/", "") seen_devs.append(label) disk = label2disk.get(label) try: element = self._get_ses_slot_for_disk(disk) except MatchNotFound: pass else: element.device_slot_set("fault") # We want spares to only identify slot for Z-series # See #32706 if self.middleware.call_sync("truenas.get_chassis_hardware").startswith("TRUENAS-Z"): spare_value = "identify" else: spare_value = "clear" for node in pool["groups"]["spare"]: for vdev in node["children"]: for dev in vdev["children"]: if dev["path"] is None: continue label = dev["path"].replace("/dev/", "") disk = label2disk.get(label) if disk is None: continue if dev["status"] != "AVAIL": continue seen_devs.append(dev["path"]) element = encs.find_device_slot(disk) if element: self.logger.debug(f"{spare_value}ing bay slot for %r", disk) element.device_slot_set(spare_value) """ Go through all devs in the pool Make sure the enclosure status is clear for everything else """ for label, disk in label2disk.items(): if label in seen_devs: continue seen_devs.append(label) try: element = self._get_ses_slot_for_disk(disk) except MatchNotFound: pass else: element.device_slot_set("clear") disks = [] for label in seen_devs: disk = label2disk[label] if disk.startswith("multipath/"): try: disks.append(self.middleware.call_sync( "disk.query", [["devname", "=", disk]], {"get": True, "extra": {"include_expired": True}, "order_by": ["expiretime"]}, )["name"]) except IndexError: pass else: disks.append(disk) """ Clear all slots without an attached disk """ for enc in encs: for element in enc.iter_by_name().get("Array Device Slot", []): if not element.devname or element.devname not in disks: element.device_slot_set("clear")
def Install(**kwargs): """ This does the grunt-work of actually doing the install. The possible arguments are: - config Object containing configuration. This is where the download URL and package directories will be specified. - interactive Whether or not to be interactive. If true (default), then bsd.Dialog will be used to for status and error messages. - disks An array of Disk objects to install to. If set, then the disks will be partitioned and erased. If NOT set, then the installer will create a new boot environment on the existing freenas-boot pool. - efi Boolean indicating whether or not to use EFI (default is False). - upgrade_from An unimported ZFSPool object to install to. This must be set when upgrading, and when creating a new BE on an existing pool. - upgrade Boolean indicating whether or not to upgrade. Requires upgrade_from to be valid. - data_dir A string indicating the location of the /data. Normally this will just be "/data", but if installing from something other than the ISO, it will be necessary to specify it. - password A string indicating the root password. Ignored for upgrades; may be None (indicating no password, not recommended). - partitions An array of Partition objects (see Utils). Note that the OS partition will always be installed last. - post_install An array of callable objects, which will be called after installation, as func(mount_point=/path, **kwargs). MUST BE AN ARRAY. - package_handler Call-back for the start of each package. Arguments are (index [int], name [string], packages [array of package names]) be installed. - progress_handler Call-back after each file/directory is installed. Arguments are **kwargs, will [currently] be either done=True (indicating the package is installed), or (total=int [number of objects], index=int [current index], name=string [name of object that was just installed]). - manifest A manifest object. Must be set. - package_directory A path where the package files are located. The package files must already be located in this directory. - trampoline A boolean indicating whether the post-install scripts should be run on reboot (True, default) or during the install (False). """ LogIt("Install({})".format(kwargs)) orig_kwargs = kwargs.copy() config = kwargs.get("config", Configuration.SystemConfiguration()) interactive = kwargs.get("interactive", True) disks = kwargs.get("disks", []) efi = kwargs.get("efi", False) upgrade_pool = kwargs.get("upgrade_from", None) upgrade = kwargs.get("upgrade", False) data_dir = kwargs.get("data_dir", "/data") password = kwargs.get("password", None) extra_partitions = kwargs.get("partitions", []) post_install = kwargs.get("post_install", []) package_notifier = kwargs.get("package_handler", None) progress_notifier = kwargs.get("progress_handler", None) manifest = kwargs.get("manifest", None) trampoline = kwargs.get("trampoline", True) # The default is based on ISO layout package_dir = kwargs.get("package_directory", "/.mount/{}/Packages".format(Project())) if type(post_install) != list: post_install = [post_install] if not manifest: if interactive: try: Dialog.MessageBox(Title(), "No manifest specified for the installation", height=7, width=45).run() except: pass raise InstallationError("No manifest specified for the installation") config.SetPackageDir(package_dir) mount_point = tempfile.mkdtemp() # Quick sanity check if upgrade and upgrade_pool is None: if interactive: Dialog.MessageBox(Title(), "\nNo pool to upgrade from", height=7, width=30).run() raise InstallationError("Upgrade selected but not previous boot pool selected") if disks is None and upgrade_pool is None: if interactive: Dialog.MessageBox(Title(), "\nNo disks or previous pool selected", height=10, width=30).run() raise InstallationError("No disks or previous boot pool selected") if IsTruenas(): # We use a 16g swap partition in TrueNAS. # Note that this is only used if the disks are being formatted. extra_partitions.append(Partition(type="swap", index="3", size=16*1024*1024*1024)) def make_tn_swap(mount_point=None, **kwargs): # This uses the previously-defined variables, not kwargs if disks and mount_point: try: RunCommand("/sbin/gmirror", "label", "-b", "prefer", ["{}p3".format(disk.name) for disk in disks]) with open(os.path.join(mount_point, "data/fstab.swap"), "w") as swaptab: print("/dev/mirror/swap.eli\tnone\tswap\tsw\t0\t0", file=swaptab) except RunCommandException as e: LogIt("Could not create mirrored swap: {}".format(str(e))) post_install.append(make_tn_swap) # First step is to see if we're upgrading. # If so, we want to copy files from the active BE to # a location in /tmp, so we can copy them back later. # This will import, and then export, the freenas-boot pool. if upgrade_pool and upgrade: upgrade_dir = SaveConfiguration(interactive=interactive, pool=upgrade_pool) else: upgrade_dir = None # Second step is to see if we're formatting drives. # If so, we first will destroy the freenas-boot pool; # after that, we will partition the drives. How we partition # depends on the boot method -- efi or bios. We set the # BE name to "default" and create the freenas-boot pool, and # then the grub dataset. # # If we're NOT formatting the drive, we set the pool name # to time.strftime("default-%Y%m%d-%H%M%S") LogIt("disks = {}".format(disks)) if disks: # This means we're formatting # We need to know what size and types to make the partitions. # If we're using EFI, then we need a 100mbyte msdosfs partition; # otherwise a 512k bios-boot. If we have any extra partitions, # we'll take those into account as well. For the big freebsd-zfs # partition, we'll take the minimum of the remaining space, # rounded down to the nearest gbyte. gByte = 1024 * 1024 * 1024 if efi: # 100mbytes for efi partition used = 100 * 1024 * 1024 boot_part = Partition(type="efi", index=1, size=used) else: # BIOS partition gets 512kbytes used = 512 * 1024 boot_part = Partition(type="bios-boot", index=1, size=used) partitions = [boot_part] # For now, we always make the freenas-boot partition index 2, and place # it at the end of the disk. next_index = 3 for part in (extra_partitions or []): # We will ignore the index given here. part.index = next_index used += part.size LogIt("Additional partition {}".format(part)) partitions.append(part) next_index += 1 # At this point, used is the sum of the partitions, in bytes. # This isn't really correct - we should be rounding the size up # to the blocksize of the disk. But partitioning behaves strangely # sometimes with flash drives. As a result, when we do the actual # partitioning, we use the smart-size (e.g., 1G), which rounds down. min_size = 0 for disk in disks: # If the remaining space is too small, this installation won't work well. size = disk.size size = size - used if size < gByte: if size < 0: fspace = "no free space after the other partitions" else: fspace = "free space is {}, minimum is 1Gbyte".format(SmartSize(size)) name = disk.name LogIt("Disk {} is too small {}".format(name, fspace)) ssize = SmartSize(disk.size) if interactive: Dialog.MessageBox(Title(), "Disk {} is too small ({})".format(name, ssize), height=10, width=25).run() raise InstallationException("Disk {} is too small ({})".format(name, ssize)) if (size < min_size) or (not min_size): min_size = size if min_size == 0: if interactive: Dialog.MessageBox(Title(), "Unable to find the size of any of the selected disks", height=15, weidth=60).run() raise InstallationError("Unable to find disk size") # Round min_size down to a gbyte part_size = int(min_size / gByte) * gByte os_part = Partition(type="freebsd-zfs", index=2, size=part_size, os=True) LogIt("OS partition {}".format(os_part)) partitions.append(os_part) # We need to destroy any existing freenas-boot pool. # To do that, we may first need to import the pool. if upgrade_pool is None: try: old_pools = list(zfs.find_import(name="freenas-boot")) except libzfs.ZFSException as e: LogIt("Got ZFS error {} while trying to import freenas-boot for destruction".format(str(e))) old_pools = [] else: old_pools = [upgrade_pool] # We'll be destroying it, so.. upgrade_pool = None for pool in old_pools: try: dead_pool = zfs.import_pool(pool, "freenas-boot", {}) if dead_pool is None: dead_pool = zfs.get("freenas-boot") zfs.destroy("freenas-boot") except libzfs.ZFSException as e: LogIt("Trying to destroy a freenas-boot pool got error {}".format(str(e))) try: freenas_boot = FormatDisks(disks, partitions, interactive) except BaseException as e: LogIt("FormatDisks got exception {}".format(str(e))) raise bename = "freenas-boot/ROOT/default" else: # We need to import the pool (we exported it above if upgrade_pool) try: if upgrade_pool: freenas_boot = zfs.import_pool(upgrade_pool, "freenas-boot", {}) else: freenas_boot = None pools = list(zfs.find_import(name="freenas-boot")) if len(pools) > 1: raise InstallationError("There are multiple unimported freenas-boot pools") if len(pools) == 1: freenas_boot = zfs.import_pool(upgrade_pool, "freenas-boot", {}) if freenas_boot is None: freenas_boot = zfs.get("freenas-boot") except libzfs.ZFSException as e: LogIt("Got ZFS error {} while trying to import pool".format(str(e))) if interactive: Dialog.MessageBox("Error importing boot pool", "The {} Installer was unable to import the boot pool:\n\n\t{}".format(Project(), str(e)), height=25, width=60).run() raise InstallationError("Unable to import boot pool") bename = time.strftime("freenas-boot/ROOT/default-%Y%m%d-%H%M%S") # Next, we create the dataset, and mount it, and then mount # the grub dataset. # We also mount a devfs and tmpfs in the new environment. LogIt("BE name is {}".format(bename)) try: freenas_boot.create(bename, fsopts={ "mountpoint" : "legacy", "sync" : "disabled", }) except libzfs.ZFSException as e: LogIt("Could not create BE {}: {}".format(bename, str(e))) if interactive: Dialog.MessageBox(Title(), "An error occurred creatint the installation boot environment\n" + "\n\t{}".format(str(e)), height=25, width=60).run() raise InstallationError("Could not create BE {}: {}".format(bename, str(e))) MountFilesystems(bename, mount_point) # After this, any exceptions need to have the filesystems unmounted try: # If upgrading, copy the stashed files back if upgrade_dir: RestoreConfiguration(save_path=upgrade_dir, interactive=interactive, destination=mount_point) else: if os.path.exists(data_dir): try: copytree(data_dir, "{}/data".format(mount_point), progress_callback=lambda src, dst: LogIt("Copying {} -> {}".format(src, dst))) except: pass # # We should also handle some FN9 stuff # In this case, we want the newer database file, for migration purposes # XXX -- this is a problem when installing from FreeBSD for dbfile in ["freenas-v1.db", "factory-v1.db"]: if os.path.exists("/data/{}".format(dbfile)): copytree("/data/{}".format(dbfile), "{}/data/{}".format(mount_point, dbfile)) # After that, we do the installlation. # This involves mounting the new BE, # and then running the install code on it. installer = Installer.Installer(manifest=manifest, root=mount_point, config=config) if installer.GetPackages() is not True: LogIt("Installer.GetPackages() failed") raise InstallationError("Unable to load packages") # This should only be true for the ISO installer. installer.trampoline = trampoline start_time = time.time() try: installer.InstallPackages(progressFunc=progress_notifier, handler=package_notifier) except BaseException as e: LogIt("InstallPackaages got exception {}".format(str(e))) raise InstallationError("Could not install packages") # Packages installed! if interactive: try: status = Dialog.MessageBox(Title(), "Preparing new boot environment", height=5, width=35, wait=False) status.clear() status.run() except: pass for f in ["{}/conf/default/etc/fstab".format(mount_point), "{}/conf/base/etc/fstab".format(mount_point) ]: try: os.remove(f) except: LogIt("Unable to remove {} -- ignoring".format(f)) try: with open("{}/etc/fstab".format(mount_point), "w") as fstab: print("freenas-boot/grub\t/boot/grub\tzfs\trw,noatime\t1\t0", file=fstab) except OSError as e: LogIt("Unable to create fstab: {}".format(str(e))) raise InstallationError("Unable to create filesystem table") try: os.link("{}/etc/fstab".format(mount_point), "{}/conf/base/etc/fstab".format(mount_point)) except OSError as e: LogIt("Unable to link /etc/fstab to /conf/base/etc/fstab: {}".format(str(e))) # Here, I should change module_path in boot/loader.conf, and get rid of the kernel line try: lines = [] boot_config = "{}/boot/loader.conf".format(mount_point) with open(boot_config, "r") as bootfile: for line in bootfile: line = line.rstrip() if line.startswith("module_path="): lines.append('module_path="/boot/kernel;/boot/modules;/usr/local/modules"') elif line.startswith("kernel="): lines.append('kernel="kernel"') else: lines.append(line) with open(boot_config, "w") as bootfile: for line in lines: print(line, file=bootfile) except BaseException as e: LogIt("While modifying loader.conf, got exception {}".format(str(e))) # Otherwise I'll ignore it, I think # This is to support Xen try: hvm = RunCommand("/usr/local/sbin/dmidecode", "-s", "system-product-name", chroot=mount_point) if hvm == "HVM domU": with open(os.path.join(mount_point, "boot", "loader.conf.local"), "a") as f: print('hint.hpet.0.clock="0"', file=f) except BaseException as e: LogIt("Got an exception trying to set XEN boot loader hint: {}".format(str(e))) # Now I have to mount a tmpfs on var try: LogIt("Mounting tmpfs on var") bsd.nmount(source="tmpfs", fspath=os.path.join(mount_point, "var"), fstype="tmpfs") except BaseException as e: LogIt("Got exception {} while trying to mount {}/var: {}".format(mount_point, str(e))) raise InstallationError("Unable to mount temporary space in newly-created BE") # Now we need to populate a data structure mtree_command = ["/usr/sbin/mtree", "-deUf" ] if os.path.exists("/usr/sbin/mtree"): mtree_command.append("{}/etc/mtree/BSD.var.dist".format(mount_point)) mtree_command.extend(["-p", "{}/var".format(mount_point)]) chroot=None else: mtree_command.extend(["/etc/mtree/BSD.var.dist", "-p", "/var"]) chroot=mount_point try: RunCommand(*mtree_command, chroot=chroot) except RunCommandException as e: LogIt("{} (chroot={}) failed: {}".format(mtree_command, chroot, str(e))) raise InstallationError("Unable to prepare new boot environment") try: # Now we need to install grub # We do this even if we didn't format the disks. # But if we didn't format the disks, we need to use the same type # of boot loader. if interactive: try: status = Dialog.MessageBox(Title(), "Installing boot loader", height=5, width=35, wait=False) status.clear() status.run() except: pass # We've just repartitioned, so rescan geom geom.scan() # Set the boot dataset freenas_boot.properties["bootfs"].value = bename LogIt("Set bootfs to {}".format(bename)) # This is EXTREMELY ANNOYING. # I'd like to use libzfs to set the property here, but # I have to do it in the chrooted environment, because otherwise # zfs returns an error and doesn't set it. #freenas_boot.properties["cachefile"].value = "/boot/zfs/rpool.cache" try: RunCommand("/sbin/zpool", "set", "cachefile=/boot/zfs/rpool.cache", "freenas-boot", chroot=mount_point) except RunCommandException as e: LogIt("Got exception {} while trying to set cachefile".format(str(e))) raise InstallationException("Could not set cachefile on boot pool") LogIt("Set cachefile to /boot/zfs/rpool.cache") # We need to set the serial port stuff in the database before running grub, # because it'll use that in the configuration file it generates. try: SaveSerialSettings(mount_point) except: raise InstallationError("Could not save serial console settings") try: # All boot pool disks are partitioned using the same type. # Or the darkness rises and squit once again rule the earth. # (It's happened.) use_efi = Utils.BootPartitionType(freenas_boot.disks[0]) == "efi" InstallGrub(chroot=mount_point, disks=freenas_boot.disks, bename=bename, efi=use_efi) except RunCommandException as e: LogIt("Command {} failed: {} (code {})".format(e.command, e.message, e.code)) raise InstallationError("Boot loader installation failure") except BaseException as e: LogIt("InstallGrub got exception {}".format(str(e))) raise if interactive: try: status = Dialog.MessageBox(Title(), "Finalizing installation", height=5, width=35, wait=False) status.clear() status.run() except BaseException as e: LogIt("Finalizing got exception {}".format(str(e))) # This is FN9 specific with open("{}/data/first-boot".format(mount_point), "wb"): pass if upgrade: for sentinel in ["/data/cd-upgrade", "/data/need-update"]: with open(mount_point + sentinel, "wb") as f: pass elif password is not None: if interactive: try: status = Dialog.MessageBox(Title(), "\nSetting root password", height=7, width=35, wait=False) status.clear() status.run() except: pass try: RunCommand("/etc/netcli", "reset_root_pw", password, chroot=mount_point) except RunCommandException as e: LogIt("Setting root password: {}".format(str(e))) raise InstallationError("Unable to set root password") except BaseException as e: LogIt("Got exception {} during configuration".format(str(e))) if interactive: try: Dialog.MessageBox(Title(), "Error during configuration", height=7, width=35).run() except: pass raise # Let's turn sync back to default for the dataset try: ds = zfs.get_dataset(bename) except libzfs.ZFSException as e: LogIt("Got ZFS error {} while trying to get {} dataset".format(str(e), bename)) raise InstallationError("Could not fid newly-created BE {}".format(bename)) try: ds.properties["sync"].inherit() except BaseException as e: LogIt("Unable to set sync on {} to inherit: {}".format(bename, str(e))) # That's all I'm going to do for now # We save the manifest manifest.Save(mount_point) # Okay! Now if there are any post-install functions, we call them for fp in post_install: fp(mount_point=mount_point, **kwargs) # And we're done! end_time = time.time() except InstallationError as e: # This is the outer try block -- it needs to ensure mountpoints are # cleaned up LogIt("Outer block got error {}".format(str(e))) if interactive: try: Dialog.MessageBox("{} Installation Error".format(Project()), e.message, height=25, width=50).run() except: pass raise except BaseException as e: LogIt("Outer block got base exception {}".format(str(e))) raise finally: if package_dir is None: LogIt("Removing downloaded packages directory {}".format(cache_dir)) shutil.rmtree(cache_dir, ignore_errors=True) UnmountFilesystems(mount_point) LogIt("Exporting freenas-boot at end of installation") try: zfs.export_pool(freenas_boot) except libzfs.ZFSException as e: LogIt("Could not export freenas boot: {}".format(str(e))) raise if interactive: total_time = int(end_time - start_time) Dialog.MessageBox(Title(), "The {} installer has finished the installation in {} seconds".format(Project(), total_time), height=8, width=40).run()
def FormatDisks(disks, partitions, interactive): """ Format the given disks. Either returns a handle for the pool, or raises an exception. """ # We don't care if these commands fail if interactive: status = Dialog.MessageBox(Title(), "Partitioning drive(s)", height=7, width=40, wait=False) status.clear() status.run() os_partition = None for part in partitions: if part.os is True: if os_partition is None: os_partition = part.index else: if os_partition != part.index: if interactive: Dialog.MessageBox("Partitioning Error", "Multiple partitions are claiming to be the OS partitions. This must be due to a bug. Aborting before any formatting is done", height=10, width=45).run() raise InstallationError("Multiple OS partitions") # This could fail for a couple of reasons, but mostly we don't care. try: for disk in disks: RunCommand("/sbin/gpart", "destroy", "-F", disk.name) except: pass try: os_partition = None for disk in disks: # One thing we have to worry about is gmirror, which won't # let us repartition if it's in use. So we need to find out # if the disk is in use by a mirror, and if so, we need # to remove the appropriate device, partition, or label from # the mirror. (Note that there may be more than one mapping, # conceivably, so what we need is a pairing of object -> mirror name. for (mname, pname) in Utils.FindMirrors(disk): try: RunCommand("/sbin/gmirror remove {} {}".format(mname, pname)) except: LogIt("Unable to remove {} from mirror {}; this may cause a failure in a bit".format(pname, mname)) RunCommand("/sbin/gpart", "create", "-s", "GPT", "-f", "active", disk.name) # For best purposes, the freebsd-boot partition-to-be # should be the last one in the list. for part in partitions: if part.os is True: os_partition = part.index RunCommand("/sbin/gpart", "add", "-t", part.type, "-i", part.index, "-s", part.smart_size, disk.name) if part.type == "efi": RunCommand("/sbin/newfs_msdos", "-F", "16", "/dev/{}p{}".format(disk.name, part.index)) geom.scan() if len(disks) > 1: vdev = libzfs.ZFSVdev(zfs, "mirror") components = [] for disk in disks: tdev = libzfs.ZFSVdev(zfs, "disk") tdev.path = "/dev/{}p{}".format(disk.name, os_partition) components.append(tdev) vdev.children = components else: vdev = libzfs.ZFSVdev(zfs, "disk") vdev.path = "/dev/{}p{}".format(disks[0].name, os_partition) LogIt("Calling zfs.create, vdev = {}".format(vdev)) try: freenas_boot = zfs.create("freenas-boot", topology={"data": [vdev]}, opts={ "cachefile" : "/tmp/zpool.cache", "version" : "28", }, fsopts={ "mountpoint" : "none", "atime" : "off", "canmount" : "off", }) except: LogIt("Got exception while creating boot pool", exc_info=True) raise LogIt("Created freenas-boot") for feature in freenas_boot.features: if feature.name in ["async_destroy", "empty_bpobj", "lz4_compress"]: feature.enable() LogIt("Setting compression to lz4") freenas_boot.root_dataset.properties["compression"].value = "lz4" LogIt("Creating grub dataset") freenas_boot.create("freenas-boot/grub", { "mountpoint" : "legacy" }) LogIt("Creating ROOT dataset") freenas_boot.create("freenas-boot/ROOT", { "canmount" : "off" }) except libzfs.ZFSException as e: LogIt("Got zfs exception {}".format(str(e))) if interactive: Dialog.MessageBox("Boot Pool Creation Failure", "The {} Installer was unable to create the boot pool:\n\n\t{}".format(Project(), str(e)), height=25, width=60).run() raise InstallationError("Unable to create boot pool") except RunCommandException as e: LogIt(str(e)) if interactive: Dialog.MessageBox("Partitioning failure", str("The {} Installer was unable to partition. The command:\n" + "\t{}\n" + "failed with the message:\n" + "\t{}").format(Project(), e.command, e.message), height=25, width=60).run() raise InstallationError("Error during partitioning: \"{}\" returned \"{}\"".format(e.command, e.message)) except Dialog.DialogEscape: raise except BaseException as e: LogIt("Got exception {} while partitioning".format(str(e))) if interactive: Dialog.MessageBox("Partitioning failure", "The {} installer got an exception while partitioning:\n\n\t{}".format(Project(), str(e)), height=25, width=60).run() raise InstallationError("Error during partitioning") return freenas_boot
def rescan(self): geom.scan() self.__init__(self._name)
def attach_to_multipath(dispatcher, disk, ds_disk, path): if not disk and ds_disk: logger.info("Device node %s <%s> is marked as multipath, creating single-node multipath", path, ds_disk['serial']) nodename = os.path.basename(ds_disk['path']) logger.info('Reusing %s path', nodename) # Degenerated single-disk multipath try: dispatcher.exec_and_wait_for_event( 'system.device.attached', lambda args: args['path'] == '/dev/multipath/{0}'.format(nodename), lambda: system('/sbin/gmultipath', 'create', nodename, path) ) except SubprocessException as e: logger.warning('Cannot create multipath: {0}'.format(e.err)) return ret = { 'is_multipath': True, 'path': os.path.join('/dev/multipath', nodename), } elif disk: logger.info("Device node %s is another path to disk <%s> (%s)", path, disk['id'], disk['description']) if disk['is_multipath']: if path in disk['multipath.members']: # Already added return # Attach new disk try: system('/sbin/gmultipath', 'add', disk['multipath.node'], path) except SubprocessException as e: logger.warning('Cannot attach {0} to multipath: {0}'.format(path, e.err)) return nodename = disk['multipath.node'] ret = { 'is_multipath': True, 'path': os.path.join('/dev/multipath', disk['multipath.node']), } else: # Create new multipath logger.info('Creating new multipath device') # If disk was previously tied to specific cdev path (/dev/multipath[0-9]+) # reuse that path. Otherwise pick up first multipath device name available if ds_disk and ds_disk['is_multipath']: nodename = os.path.basename(ds_disk['path']) logger.info('Reusing %s path', nodename) else: nodename = get_multipath_name() logger.info('Using new %s path', nodename) try: dispatcher.exec_and_wait_for_event( 'system.device.attached', lambda args: args['path'] == '/dev/multipath/{0}'.format(nodename), lambda: system('/sbin/gmultipath', 'create', nodename, disk['path'], path) ) except SubprocessException as e: logger.warning('Cannot create multipath: {0}'.format(e.err)) return ret = { 'is_multipath': True, 'path': os.path.join('/dev/multipath', nodename), } geom.scan() gmultipath = geom.geom_by_name('MULTIPATH', nodename) ret['multipath'] = generate_multipath_info(gmultipath) return ret
def check_clean(self, disk): geom.scan() return geom.class_by_name('PART').xml.find( f'.//geom[name="{disk}"]') is None
def retrieve_geom_class(self, class_name): geom.scan() return geom.class_by_name(class_name)
def identifier_to_device(self, ident, disks): if not ident: return None search = self.RE_IDENTIFIER.search(ident) if not search: return None geom.scan() tp = search.group('type') # We need to escape single quotes to html entity value = search.group('value').replace("'", '%27') if tp == 'uuid': search = geom.class_by_name('PART').xml.find( f'.//config[rawuuid = "{value}"]/../../name') if search is not None and not search.text.startswith('label'): return search.text elif tp == 'label': search = geom.class_by_name('LABEL').xml.find( f'.//provider[name = "{value}"]/../name') if search is not None: return search.text elif tp == 'serial': search = geom.class_by_name('DISK').xml.find( f'.//provider/config[ident = "{value}"]/../../name') if search is not None: return search.text # Builtin xml xpath do not understand normalize-space search = etree.fromstring( ElementTree.tostring(geom.class_by_name('DISK').xml)) search = search.xpath( './/provider/config[' f'normalize-space(ident) = normalize-space("{value}")' ']/../../name') if len(search) > 0: return search[0].text disks = self.middleware.call_sync('disk.query', [('serial', '=', value)]) if disks: return disks[0]['name'] elif tp == 'serial_lunid': # Builtin xml xpath do not understand concat search = etree.fromstring( ElementTree.tostring(geom.class_by_name('DISK').xml)) search = search.xpath( f'.//provider/config[concat(ident,"_",lunid) = "{value}"]/../../name' ) if len(search) > 0: return search[0].text elif tp == 'devicename': if os.path.exists(f'/dev/{value}'): return value else: raise NotImplementedError(f'Unknown type {tp!r}')
def update_disk_cache(dispatcher, path): geom.scan() name = os.path.basename(path) gdisk = geom.geom_by_name('DISK', name) gpart = geom.geom_by_name('PART', name) gmultipath = geom.geom_by_name('MULTIPATH', path.split('/')[-1]) disk = get_disk_by_path(path) if not disk: return old_id = disk['id'] if gmultipath: # Path represents multipath device (not disk device) # MEDIACHANGE event -> use first member for hardware queries cons = gmultipath.consumers.next() gdisk = cons.provider.geom if not gdisk: return disk_info = info_from_device(gdisk.name) serial = disk_info['serial'] provider = gdisk.provider partitions = list(generate_partitions_list(gpart)) identifier = device_to_identifier(name, serial) data_part = first_or_default(lambda x: x['type'] == 'freebsd-zfs', partitions) data_uuid = data_part["uuid"] if data_part else None swap_part = first_or_default(lambda x: x['type'] == 'freebsd-swap', partitions) swap_uuid = swap_part["uuid"] if swap_part else None disk.update({ 'mediasize': provider.mediasize, 'sectorsize': provider.sectorsize, 'max_rotation': disk_info['max_rotation'], 'smart_capable': disk_info['smart_capable'], 'smart_enabled': disk_info['smart_enabled'], 'smart_status': disk_info['smart_status'], 'id': identifier, 'schema': gpart.config.get('scheme') if gpart else None, 'partitions': partitions, 'data_partition_uuid': data_uuid, 'data_partition_path': os.path.join("/dev/gptid", data_uuid) if data_uuid else None, 'swap_partition_uuid': swap_uuid, 'swap_partition_path': os.path.join("/dev/gptid", swap_uuid) if swap_uuid else None, }) if gmultipath: disk['multipath.status'] = gmultipath.config['State'] disk['multipath.mode'] = gmultipath.config['Mode'] disk['multipath.uuid'] = gmultipath.config['UUID'] # Purge old cache entry if identifier has changed if old_id != identifier: logger.debug('Removing disk cache entry for <%s> because identifier changed', old_id) diskinfo_cache.remove(old_id) diskinfo_cache.put(identifier, disk) dispatcher.datastore.delete('disks', old_id) persist_disk(dispatcher, disk)
def retrieve_disk_geom_class(self): geom.scan() return geom.class_by_name('DISK')