def run(self, pool, new_vdevs, updated_vdevs): try: self.pool = pool zfs = libzfs.ZFS() pool = zfs.get(pool) if new_vdevs: nvroot = convert_topology(zfs, new_vdevs) pool.attach_vdevs(nvroot) if updated_vdevs: for i in updated_vdevs: vdev = pool.vdev_by_guid(long(i['target_guid'])) if not vdev: raise TaskException( errno.ENOENT, 'Vdev with GUID {0} not found'.format( i['target_guid'])) new_vdev = libzfs.ZFSVdev(zfs, i['vdev']['type']) new_vdev.path = i['vdev']['path'] vdev.attach(new_vdev) # Wait for resilvering process to complete self.started = True self.dispatcher.test_or_wait_for_event( 'fs.zfs.resilver.finished', lambda args: args['guid'] == str(pool.guid), lambda: pool.scrub.state == libzfs.ScanState.SCANNING and pool.scrub.function == libzfs.ScanFunction.RESILVER) except libzfs.ZFSException, err: raise TaskException(errno.EFAULT, str(err))
def extend(self, job, name, new=None, existing=None): """ Extend a zfs pool `name` with `new` vdevs or attach to `existing` vdevs. """ if new is None and existing is None: raise CallError('New or existing vdevs must be provided', errno.EINVAL) if new: raise CallError('Adding new vdev is not implemented yet') try: zfs = libzfs.ZFS() pool = zfs.get(name) # Make sure we can find all target vdev for i in (existing or []): target = find_vdev(pool, i['target']) if target is None: raise CallError(f'Failed to find vdev for {target}', errno.EINVAL) i['target'] = target for i in (existing or []): newvdev = libzfs.ZFSVdev(zfs, i['type'].lower()) newvdev.path = i['path'] i['target'].attach(newvdev) except libzfs.ZFSException as e: raise CallError(str(e), e.code)
def extend(self, job, name, new, existing): """ Extend a zfs pool `name` with `new` vdevs or attach to `existing` vdevs. """ if new is None and existing is None: raise CallError('New or existing vdevs must be provided', errno.EINVAL) try: with libzfs.ZFS() as zfs: pool = zfs.get(name) if new: topology = convert_topology(zfs, new) pool.attach_vdevs(topology) # Make sure we can find all target vdev for i in (existing or []): target = find_vdev(pool, i['target']) if target is None: raise CallError( f"Failed to find vdev for {i['target']}", errno.EINVAL) i['target'] = target for i in (existing or []): newvdev = libzfs.ZFSVdev(zfs, i['type'].lower()) newvdev.path = i['path'] i['target'].attach(newvdev) except libzfs.ZFSException as e: raise CallError(str(e), e.code)
def convert_topology(zfs, vdevs): topology = defaultdict(list) for vdev in vdevs: children = [] for device in vdev['devices']: z_cvdev = libzfs.ZFSVdev(zfs, 'disk') z_cvdev.type = 'disk' z_cvdev.path = device children.append(z_cvdev) if vdev['type'] == 'STRIPE': topology[vdev['root'].lower()].extend(children) else: z_vdev = libzfs.ZFSVdev(zfs, 'disk') z_vdev.type = vdev['type'].lower() z_vdev.children = children topology[vdev['root'].lower()].append(z_vdev) return topology
def run(self, pool, guid, vdev): try: zfs = libzfs.ZFS() pool = zfs.get(pool) ovdev = pool.vdev_by_guid(long(guid)) if not vdev: raise TaskException( errno.ENOENT, 'Vdev with GUID {0} not found'.format(guid)) new_vdev = libzfs.ZFSVdev(zfs, vdev['type']) new_vdev.path = vdev['path'] ovdev.replace(new_vdev) except libzfs.ZFSException, err: raise TaskException(errno.EFAULT, str(err))
def convert_topology(zfs, topology): nvroot = {} for group, vdevs in topology.items(): nvroot[group] = [] for i in vdevs: vdev = libzfs.ZFSVdev(zfs, "disk") vdev.type = i['type'] if i['type'] == 'disk': vdev.path = i['path'] if 'children' in i: ret = [] for c in i['children']: cvdev = libzfs.ZFSVdev(zfs, "disk") cvdev.type = c['type'] cvdev.path = c['path'] ret.append(cvdev) vdev.children = ret nvroot[group].append(vdev) return nvroot
def replace(self, name, label, dev): """ Replace device `label` with `dev` in pool `name`. """ try: zfs = libzfs.ZFS() pool = zfs.get(name) target = find_vdev(pool, label) if target is None: raise CallError(f'Failed to find vdev for {label}', errno.EINVAL) newvdev = libzfs.ZFSVdev(zfs, 'disk') newvdev.path = f'/dev/{dev}' target.replace(newvdev) except libzfs.ZFSException as e: raise CallError(str(e), e.code)
def replace(self, name, label, dev): """ Replace device `label` with `dev` in pool `name`. """ try: with libzfs.ZFS() as zfs: pool = zfs.get(name) target = find_vdev(pool, label) if target is None: raise CallError(f'Failed to find vdev for {label}', errno.EINVAL) newvdev = libzfs.ZFSVdev(zfs, 'disk') newvdev.path = f'/dev/{dev}' # FIXME: Replace using old path is not working for some reason # Lets use guid for now. target.path = str(target.guid) target.replace(newvdev) except libzfs.ZFSException as e: raise CallError(str(e), e.code)
def FormatDisks(disks, partitions, interactive): """ Format the given disks. Either returns a handle for the pool, or raises an exception. """ # We don't care if these commands fail if interactive: status = Dialog.MessageBox(Title(), "Partitioning drive(s)", height=7, width=40, wait=False) status.clear() status.run() os_partition = None for part in partitions: if part.os is True: if os_partition is None: os_partition = part.index else: if os_partition != part.index: if interactive: Dialog.MessageBox("Partitioning Error", "Multiple partitions are claiming to be the OS partitions. This must be due to a bug. Aborting before any formatting is done", height=10, width=45).run() raise InstallationError("Multiple OS partitions") # This could fail for a couple of reasons, but mostly we don't care. try: for disk in disks: RunCommand("/sbin/gpart", "destroy", "-F", disk.name) except: pass try: os_partition = None for disk in disks: # One thing we have to worry about is gmirror, which won't # let us repartition if it's in use. So we need to find out # if the disk is in use by a mirror, and if so, we need # to remove the appropriate device, partition, or label from # the mirror. (Note that there may be more than one mapping, # conceivably, so what we need is a pairing of object -> mirror name. for (mname, pname) in Utils.FindMirrors(disk): try: RunCommand("/sbin/gmirror remove {} {}".format(mname, pname)) except: LogIt("Unable to remove {} from mirror {}; this may cause a failure in a bit".format(pname, mname)) RunCommand("/sbin/gpart", "create", "-s", "GPT", "-f", "active", disk.name) # For best purposes, the freebsd-boot partition-to-be # should be the last one in the list. for part in partitions: if part.os is True: os_partition = part.index RunCommand("/sbin/gpart", "add", "-t", part.type, "-i", part.index, "-s", part.smart_size, disk.name) if part.type == "efi": RunCommand("/sbin/newfs_msdos", "-F", "16", "/dev/{}p{}".format(disk.name, part.index)) geom.scan() if len(disks) > 1: vdev = libzfs.ZFSVdev(zfs, "mirror") components = [] for disk in disks: tdev = libzfs.ZFSVdev(zfs, "disk") tdev.path = "/dev/{}p{}".format(disk.name, os_partition) components.append(tdev) vdev.children = components else: vdev = libzfs.ZFSVdev(zfs, "disk") vdev.path = "/dev/{}p{}".format(disks[0].name, os_partition) LogIt("Calling zfs.create, vdev = {}".format(vdev)) try: freenas_boot = zfs.create("freenas-boot", topology={"data": [vdev]}, opts={ "cachefile" : "/tmp/zpool.cache", "version" : "28", }, fsopts={ "mountpoint" : "none", "atime" : "off", "canmount" : "off", }) except: LogIt("Got exception while creating boot pool", exc_info=True) raise LogIt("Created freenas-boot") for feature in freenas_boot.features: if feature.name in ["async_destroy", "empty_bpobj", "lz4_compress"]: feature.enable() LogIt("Setting compression to lz4") freenas_boot.root_dataset.properties["compression"].value = "lz4" LogIt("Creating grub dataset") freenas_boot.create("freenas-boot/grub", { "mountpoint" : "legacy" }) LogIt("Creating ROOT dataset") freenas_boot.create("freenas-boot/ROOT", { "canmount" : "off" }) except libzfs.ZFSException as e: LogIt("Got zfs exception {}".format(str(e))) if interactive: Dialog.MessageBox("Boot Pool Creation Failure", "The {} Installer was unable to create the boot pool:\n\n\t{}".format(Project(), str(e)), height=25, width=60).run() raise InstallationError("Unable to create boot pool") except RunCommandException as e: LogIt(str(e)) if interactive: Dialog.MessageBox("Partitioning failure", str("The {} Installer was unable to partition. The command:\n" + "\t{}\n" + "failed with the message:\n" + "\t{}").format(Project(), e.command, e.message), height=25, width=60).run() raise InstallationError("Error during partitioning: \"{}\" returned \"{}\"".format(e.command, e.message)) except Dialog.DialogEscape: raise except BaseException as e: LogIt("Got exception {} while partitioning".format(str(e))) if interactive: Dialog.MessageBox("Partitioning failure", "The {} installer got an exception while partitioning:\n\n\t{}".format(Project(), str(e)), height=25, width=60).run() raise InstallationError("Error during partitioning") return freenas_boot