def destroy_upload_location(self): geom.scan() klass_label = geom.class_by_name('LABEL') prov = klass_label.xml.find( f'.//provider[name = "label/{UPLOAD_LABEL}"]/../consumer/provider') if prov is None: return klass_md = geom.class_by_name('MD') prov = klass_md.xml.find( f'.//provider[@id = "{prov.attrib["ref"]}"]/name') if prov is None: return mddev = prov.text subprocess.run( ['umount', f'/dev/label/{UPLOAD_LABEL}'], capture_output=True, check=False, ) cp = subprocess.run( ['mdconfig', '-d', '-u', mddev], text=True, capture_output=True, check=False, ) if cp.returncode != 0: raise CallError(f'Could not destroy memory device: {cp.stderr}')
def get_encrypted(self, options): """ Get all geli providers It might be an entire disk or a partition of type freebsd-zfs. Before a geli encrypted pool can be imported, disks used in the pool should be decrypted and then pool import can proceed as desired. In that case `unused` can be passed as `true`, to find out which disks are geli encrypted but not being used by active ZFS pools. """ providers = [] disks_blacklist = [] if options['unused']: disks_blacklist += self.middleware.call_sync('disk.get_reserved') geom.scan() klass_part = geom.class_by_name('PART') klass_label = geom.class_by_name('LABEL') if not klass_part: return providers for g in klass_part.geoms: for p in g.providers: if p.config is None: continue if p.config['type'] != 'freebsd-zfs': continue disk = p.geom.consumer.provider.name if disk in disks_blacklist: continue try: subprocess.run( ['geli', 'dump', p.name], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True, ) except subprocess.CalledProcessError: continue dev = None if klass_label: for g in klass_label.geoms: if g.name == p.name: dev = g.provider.name break if dev is None: dev = p.name providers.append({ 'name': p.name, 'dev': dev, 'disk': disk }) return providers
def get_encrypted(self, options): """ Get all geli providers It might be an entire disk or a partition of type freebsd-zfs """ providers = [] disks_blacklist = [] if options['unused']: disks_blacklist += self.middleware.call_sync('disk.get_reserved') geom.scan() klass_part = geom.class_by_name('PART') klass_label = geom.class_by_name('LABEL') if not klass_part: return providers for g in klass_part.geoms: for p in g.providers: if p.config['type'] != 'freebsd-zfs': continue disk = p.geom.consumer.provider.name if disk in disks_blacklist: continue try: subprocess.run( ['geli', 'dump', p.name], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True, ) except subprocess.CalledProcessError: continue dev = None if klass_label: for g in klass_label.geoms: if g.name == p.name: dev = g.provider.name break if dev is None: dev = p.name providers.append({ 'name': p.name, 'dev': dev, 'disk': disk }) return providers
def get_encrypted(self, options): """ Get all geli providers It might be an entire disk or a partition of type freebsd-zfs """ providers = [] disks_blacklist = [] if options['unused']: disks_blacklist += self.middleware.call_sync('disk.get_reserved') geom.scan() klass_part = geom.class_by_name('PART') klass_label = geom.class_by_name('LABEL') if not klass_part: return providers for g in klass_part.geoms: for p in g.providers: if p.config['type'] != 'freebsd-zfs': continue disk = p.geom.consumer.provider.name if disk in disks_blacklist: continue try: subprocess.run( ['geli', 'dump', p.name], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True, ) except subprocess.CalledProcessError: continue dev = None if klass_label: for g in klass_label.geoms: if g.name == p.name: dev = g.provider.name break if dev is None: dev = p.name providers.append({'name': p.name, 'dev': dev, 'disk': disk}) return providers
def list_partitions(self, disk): geom.scan() klass = geom.class_by_name('PART') parts = [] for g in klass.xml.findall(f'./geom[name=\'{disk}\']'): for p in g.findall('./provider'): size = p.find('./mediasize') if size is not None: try: size = int(size.text) except ValueError: size = None name = p.find('./name') part_type = p.find('./config/type') if part_type is not None: part_type = self.middleware.call_sync( 'disk.get_partition_uuid_from_name', part_type.text) if not part_type: part_type = 'UNKNOWN' part = { 'name': name.text, 'size': size, 'partition_type': part_type, 'disk': disk, 'id': p.get('id'), 'path': os.path.join('/dev', name.text), 'encrypted_provider': None, } if os.path.exists(f'{part["path"]}.eli'): part['encrypted_provider'] = f'{part["path"]}.eli' parts.append(part) return parts
def get_disks(self, name): try: with libzfs.ZFS() as zfs: disks = list(zfs.get(name).disks) except libzfs.ZFSException as e: raise CallError(str(e), errno.ENOENT) geom.scan() labelclass = geom.class_by_name('LABEL') for absdev in disks: dev = absdev.replace('/dev/', '').replace('.eli', '') find = labelclass.xml.findall( f".//provider[name='{dev}']/../consumer/provider") name = None if find: name = geom.provider_by_id(find[0].get('ref')).geom.name else: g = geom.geom_by_name('DEV', dev) if g: name = g.consumer.provider.geom.name if name and geom.geom_by_name('DISK', name): yield name else: self.logger.debug(f'Could not find disk for {dev}')
async def attach(self, job, dev, options=None): """ Attach a disk to the boot pool, turning a stripe into a mirror. `expand` option will determine whether the new disk partition will be the maximum available or the same size as the current disk. """ disks = [d async for d in await self.get_disks()] if len(disks) > 1: raise CallError('3-way mirror not supported yet') format_opts = {} if not options['expand']: # Lets try to find out the size of the current freebsd-zfs partition so # the new partition is not bigger, preventing size mismatch if one of # them fail later on. See #21336 await self.middleware.run_in_thread(geom.scan) labelclass = geom.class_by_name('PART') for e in labelclass.xml.findall(f"./geom[name='{disks[0]}']/provider/config[type='freebsd-zfs']"): format_opts['size'] = int(e.find('./length').text) break boottype = await self.format(dev, format_opts) pool = await self.middleware.call("zfs.pool.query", [["name", "=", "freenas-boot"]], {"get": True}) extend_pool_job = await self.middleware.call('zfs.pool.extend', 'freenas-boot', None, [{'target': pool["groups"]["data"][0]["guid"], 'type': 'DISK', 'path': f'/dev/{dev}p2'}]) await self.install_loader(boottype, dev) await job.wrap(extend_pool_job)
async def device_to_identifier(self, name, disks): disk_data = disks.get('name') or await self.middleware.call( 'device.get_disk', name) if disk_data and disk_data['serial_lunid']: return f'{{serial_lunid}}{disk_data["serial_lunid"]}' elif disk_data and disk_data['serial']: return f'{{serial}}{disk_data["serial"]}' await self.middleware.run_in_thread(geom.scan) klass = geom.class_by_name('PART') if klass: for g in filter(lambda v: v.name == name, klass.geoms): for p in g.providers: if p.config is None: continue if p.config['rawtype'] in await self.middleware.call( 'disk.get_valid_zfs_partition_type_uuids'): return f'{{uuid}}{p.config["rawuuid"]}' g = geom.geom_by_name('LABEL', name) if g: return f'{{label}}{g.provider.name}' g = geom.geom_by_name('DEV', name) if g: return f'{{devicename}}{name}' return ''
async def get_disks(self, name): zfs = libzfs.ZFS() try: zpool = zfs.get(name) except libzfs.ZFSException as e: raise CallError(str(e), errno.ENOENT) await self.middleware.run_in_thread(geom.scan) labelclass = geom.class_by_name('LABEL') for absdev in zpool.disks: dev = absdev.replace('/dev/', '').replace('.eli', '') find = labelclass.xml.findall( f".//provider[name='{dev}']/../consumer/provider") name = None if find: name = geom.provider_by_id(find[0].get('ref')).geom.name else: g = geom.geom_by_name('DEV', dev) if g: name = g.consumer.provider.geom.name if name and geom.geom_by_name('DISK', name): yield name else: self.logger.debug(f'Could not find disk for {dev}')
def gptid_from_part_type(self, disk, part_type): geom.scan() g = geom.class_by_name('PART') uuid = g.xml.find( f'.//geom[name="{disk}"]//config/[type="{part_type}"]/rawuuid') if uuid is None: raise ValueError(f'Partition type {part_type} not found on {disk}') return f'gptid/{uuid.text}'
def label_to_disk(self, label, geom_scan=True): if geom_scan: geom.scan() dev = self.label_to_dev(label, geom_scan=False) or label part = geom.class_by_name('PART').xml.find( f'.//provider[name="{dev}"]/../name') if part is not None: return part.text
def get_swap_size(self, disk): geom.scan() labelclass = geom.class_by_name('PART') length = labelclass.xml.find( f".//geom[name='{disk}']/provider/config[type='freebsd-swap']/length" ) if length is None: return None return int(length.text)
async def remove_disk_from_graid(self, dev): # Its possible a disk was previously used by graid so we need to make sure to # remove the disk from it (#40560) gdisk = geom.class_by_name('DISK') graid = geom.class_by_name('RAID') if gdisk and graid: prov = gdisk.xml.find(f'.//provider[name = "{dev}"]') if prov is not None: provid = prov.attrib.get('id') graid = graid.xml.find( f'.//consumer/provider[@ref = "{provid}"]/../../name') if graid is not None: cp = await run('graid', 'remove', graid.text, dev, check=False) if cp.returncode != 0: self.logger.debug('Failed to remove %s from %s: %s', dev, graid.text, cp.stderr.decode())
def label_to_dev(self, label, geom_scan=True): if label.endswith('.nop'): label = label[:-4] elif label.endswith('.eli'): label = label[:-4] if geom_scan: geom.scan() klass = geom.class_by_name('LABEL') prov = klass.xml.find(f'.//provider[name="{label}"]/../name') if prov is not None: return prov.text
def _get_class_disk(self): result = [] geom.scan() for child in geom.class_by_name('DISK').geoms: result.append({ "path": os.path.join("/dev", child.name), "name": child.name, "mediasize": child.provider.mediasize, "description": child.provider.config['descr'] }) return result
def clean_multipaths(dispatcher): global multipaths geom.scan() cls = geom.class_by_name('MULTIPATH') if cls: for i in cls.geoms: logger.info('Destroying multipath device %s', i.name) dispatcher.exec_and_wait_for_event( 'system.device.detached', lambda args: args[ 'path'] == '/dev/multipath/{0}'.format(i.name), lambda: system('/sbin/gmultipath', 'destroy', i.name)) multipaths = -1
def FindMirrors(disk): """ gmirror is stubborn, and we want to find any mirrors that use the given disk. disk is the name, e.g. ada0, not a path or partition. XXX: Other classes are probably just as stubborn! """ mirrors = geom.class_by_name("MIRROR") if not mirrors: return for mirror in mirrors.geoms: for geom_entry in mirror.consumers: rn = DiskRealName(geom_entry.provider.geom) if rn == disk: yield (mirror.name, geom_entry.provider.name)
async def swaps_remove_disks(self, disks): """ Remove a given disk (e.g. ["da0", "da1"]) from swap. it will offline if from swap, remove it from the gmirror (if exists) and detach the geli. """ await self.middleware.run_in_thread(geom.scan) providers = {} for disk in disks: partgeom = geom.geom_by_name('PART', disk) if not partgeom: continue for p in partgeom.providers: if p.config[ 'rawtype'] == '516e7cb5-6ecf-11d6-8ff8-00022d09712b': providers[p.id] = p break if not providers: return klass = geom.class_by_name('MIRROR') if not klass: return mirrors = set() for g in klass.geoms: for c in g.consumers: if c.provider.id in providers: mirrors.add(g.name) del providers[c.provider.id] swapinfo_devs = [s.devname for s in getswapinfo()] for name in mirrors: devname = f'mirror/{name}.eli' devpath = f'/dev/{devname}' if devname in swapinfo_devs: await run('swapoff', devpath) if os.path.exists(devpath): await run('geli', 'detach', devname) await run('gmirror', 'destroy', name) for p in providers.values(): devname = f'{p.name}.eli' if devname in swapinfo_devs: await run('swapoff', f'/dev/{devname}') if os.path.exists(f'/dev/{devname}'): await run('geli', 'detach', devname)
def clean_multipaths(dispatcher): global multipaths geom.scan() cls = geom.class_by_name('MULTIPATH') if cls: for i in cls.geoms: logger.info('Destroying multipath device %s', i.name) dispatcher.exec_and_wait_for_event( 'system.device.detached', lambda args: args['path'] == '/dev/multipath/{0}'.format(i.name), lambda: system('/sbin/gmultipath', 'destroy', i.name) ) multipaths = -1
async def __get_boot_type_freebsd(self): await self.middleware.run_in_thread(geom.scan) labelclass = geom.class_by_name('PART') efi = bios = 0 for disk in await self.get_disks(): for e in labelclass.xml.findall(f".//geom[name='{disk}']/provider/config/type"): if e.text == 'efi': efi += 1 elif e.text == 'freebsd-boot': bios += 1 if efi == 0 and bios == 0: return None if bios > 0: return 'BIOS' return 'EFI'
async def swaps_remove_disks(self, disks): """ Remove a given disk (e.g. ["da0", "da1"]) from swap. it will offline if from swap, remove it from the gmirror (if exists) and detach the geli. """ await self.middleware.run_in_thread(geom.scan) providers = {} for disk in disks: partgeom = geom.geom_by_name('PART', disk) if not partgeom: continue for p in partgeom.providers: if p.config['rawtype'] == '516e7cb5-6ecf-11d6-8ff8-00022d09712b': providers[p.id] = p break if not providers: return klass = geom.class_by_name('MIRROR') if not klass: return mirrors = set() for g in klass.geoms: for c in g.consumers: if c.provider.id in providers: mirrors.add(g.name) del providers[c.provider.id] swapinfo_devs = [s.devname for s in getswapinfo()] for name in mirrors: devname = f'mirror/{name}.eli' devpath = f'/dev/{devname}' if devname in swapinfo_devs: await run('swapoff', devpath) if os.path.exists(devpath): await run('geli', 'detach', devname) await run('gmirror', 'destroy', name) for p in providers.values(): devname = f'{p.name}.eli' if devname in swapinfo_devs: await run('swapoff', f'/dev/{devname}') if os.path.exists(f'/dev/{devname}'): await run('geli', 'detach', devname)
async def device_to_identifier(self, name): """ Given a device `name` (e.g. da0) returns an unique identifier string for this device. This identifier is in the form of {type}string, "type" can be one of the following: - serial_lunid - for disk serial concatenated with the lunid - serial - disk serial - uuid - uuid of a ZFS GPT partition - label - label name from geom label - devicename - name of the device if any other could not be used/found Returns: str - identifier """ await self.middleware.threaded(geom.scan) g = geom.geom_by_name('DISK', name) if g and g.provider.config.get('ident'): serial = g.provider.config['ident'] lunid = g.provider.config.get('lunid') if lunid: return f'{{serial_lunid}}{serial}_{lunid}' return f'{{serial}}{serial}' serial = await self.serial_from_device(name) if serial: return f'{{serial}}{serial}' klass = geom.class_by_name('PART') if klass: for g in klass.geoms: for p in g.providers: if p.name == name: # freebsd-zfs partition if p.config[ 'rawtype'] == '516e7cba-6ecf-11d6-8ff8-00022d09712b': return f'{{uuid}}{p.config["rawuuid"]}' g = geom.geom_by_name('LABEL', name) if g: return f'{{label}}{g.provider.name}' g = geom.geom_by_name('DEV', name) if g: return f'{{devicename}}{name}' return ''
def _get_class_multipath(self): result = [] geom.scan() cls = geom.class_by_name('MULTIPATH') if not cls: return [] for child in cls.geoms: result.append({ "path": os.path.join("/dev", child.name), "name": child.name, "mediasize": child.provider.mediasize, "members": [c.provider.name for c in child.consumers] }) return result
async def device_to_identifier(self, name): """ Given a device `name` (e.g. da0) returns an unique identifier string for this device. This identifier is in the form of {type}string, "type" can be one of the following: - serial_lunid - for disk serial concatenated with the lunid - serial - disk serial - uuid - uuid of a ZFS GPT partition - label - label name from geom label - devicename - name of the device if any other could not be used/found Returns: str - identifier """ await self.middleware.threaded(geom.scan) g = geom.geom_by_name('DISK', name) if g and g.provider.config.get('ident'): serial = g.provider.config['ident'] lunid = g.provider.config.get('lunid') if lunid: return f'{{serial_lunid}}{serial}_{lunid}' return f'{{serial}}{serial}' serial = await self.serial_from_device(name) if serial: return f'{{serial}}{serial}' klass = geom.class_by_name('PART') if klass: for g in klass.geoms: for p in g.providers: if p.name == name: # freebsd-zfs partition if p.config['rawtype'] == '516e7cba-6ecf-11d6-8ff8-00022d09712b': return f'{{uuid}}{p.config["rawuuid"]}' g = geom.geom_by_name('LABEL', name) if g: return f'{{label}}{g.provider.name}' g = geom.geom_by_name('DEV', name) if g: return f'{{devicename}}{name}' return ''
def create_upload_location(self): geom.scan() klass_label = geom.class_by_name('LABEL') prov = klass_label.xml.find( f'.//provider[name = "label/{UPLOAD_LABEL}"]/../consumer/provider') if prov is None: cp = subprocess.run( ['mdconfig', '-a', '-t', 'swap', '-s', '2800m'], text=True, capture_output=True, check=False, ) if cp.returncode != 0: raise CallError(f'Could not create memory device: {cp.stderr}') mddev = cp.stdout.strip() subprocess.run(['glabel', 'create', UPLOAD_LABEL, mddev], capture_output=True, check=False) cp = subprocess.run( ['newfs', f'/dev/label/{UPLOAD_LABEL}'], text=True, capture_output=True, check=False, ) if cp.returncode != 0: raise CallError( f'Could not create temporary filesystem: {cp.stderr}') shutil.rmtree(UPLOAD_LOCATION, ignore_errors=True) os.makedirs(UPLOAD_LOCATION) cp = subprocess.run( ['mount', f'/dev/label/{UPLOAD_LABEL}', UPLOAD_LOCATION], text=True, capture_output=True, check=False, ) if cp.returncode != 0: raise CallError( f'Could not mount temporary filesystem: {cp.stderr}') shutil.chown(UPLOAD_LOCATION, 'www', 'www') os.chmod(UPLOAD_LOCATION, 0o755) return UPLOAD_LOCATION
async def _get_disk(self): await self.middleware.threaded(geom.scan) disks = {} klass = geom.class_by_name('DISK') if not klass: return disks for g in klass.geoms: # Skip cd* if g.name.startswith('cd'): continue disk = { 'name': g.name, 'mediasize': g.provider.mediasize, 'sectorsize': g.provider.sectorsize, 'stripesize': g.provider.stripesize, } disk.update(g.provider.config) disks[g.name] = disk return disks
def get_disks(self, name): disks = self.get_devices(name) geom.scan() labelclass = geom.class_by_name('LABEL') for dev in disks: dev = dev.replace('.eli', '') find = labelclass.xml.findall(f".//provider[name='{dev}']/../consumer/provider") name = None if find: name = geom.provider_by_id(find[0].get('ref')).geom.name else: g = geom.geom_by_name('DEV', dev) if g: name = g.consumer.provider.geom.name if name and (name.startswith('multipath/') or geom.geom_by_name('DISK', name)): yield name else: self.logger.debug(f'Could not find disk for {dev}')
async def _get_disk(self): await self.middleware.run_in_thread(geom.scan) disks = {} klass = geom.class_by_name('DISK') if not klass: return disks for g in klass.geoms: # Skip cd* if g.name.startswith('cd'): continue disk = { 'name': g.name, 'mediasize': g.provider.mediasize, 'sectorsize': g.provider.sectorsize, 'stripesize': g.provider.stripesize, } if g.provider.config: disk.update(g.provider.config) disks[g.name] = disk return disks
async def __multipath_next(self): """ Find out the next available name for a multipath named diskX where X is a crescenting value starting from 1 Returns: The string of the multipath name to be created """ await self.middleware.threaded(geom.scan) numbers = sorted([ int(RE_MPATH_NAME.search(g.name).group(1)) for g in geom.class_by_name('MULTIPATH').geoms if RE_MPATH_NAME.match(g.name) ]) if not numbers: numbers = [0] for number in range(1, numbers[-1] + 2): if number not in numbers: break else: raise ValueError('Could not find multipaths') return f'disk{number}'
async def get_boot_type(self): """ Get the boot type of the boot pool. Returns: "BIOS", "EFI", None """ await self.middleware.run_in_thread(geom.scan) labelclass = geom.class_by_name('PART') efi = bios = 0 async for disk in await self.get_disks(): for e in labelclass.xml.findall(f".//geom[name='{disk}']/provider/config/type"): if e.text == 'efi': efi += 1 elif e.text == 'bios-boot': bios += 1 if efi == 0 and bios == 0: return None if bios > 0: return 'BIOS' return 'EFI'
async def __multipath_next(self): """ Find out the next available name for a multipath named diskX where X is a crescenting value starting from 1 Returns: The string of the multipath name to be created """ await self.middleware.run_in_thread(geom.scan) numbers = sorted([ int(RE_MPATH_NAME.search(g.name).group(1)) for g in geom.class_by_name('MULTIPATH').geoms if RE_MPATH_NAME.match(g.name) ]) if not numbers: numbers = [0] for number in range(1, numbers[-1] + 2): if number not in numbers: break else: raise ValueError('Could not find multipaths') return f'disk{number}'
async def attach(self, dev, options=None): """ Attach a disk to the boot pool, turning a stripe into a mirror. `expand` option will determine whether the new disk partition will be the maximum available or the same size as the current disk. """ disks = [d async for d in await self.get_disks()] if len(disks) > 1: raise CallError('3-way mirror not supported yet') format_opts = {} if not options['expand']: # Lets try to find out the size of the current freebsd-zfs partition so # the new partition is not bigger, preventing size mismatch if one of # them fail later on. See #21336 await self.middleware.run_in_thread(geom.scan) labelclass = geom.class_by_name('PART') for e in labelclass.xml.findall( f"./geom[name='{disks[0]}']/provider/config[type='freebsd-zfs']" ): format_opts['size'] = int(e.find('./length').text) break boottype = await self.format(dev, format_opts) await self.middleware.call('zfs.pool.extend', 'freenas-boot', None, [{ 'target': f'{disks[0]}p2', 'type': 'DISK', 'path': f'/dev/{dev}p2' }]) # We need to wait a little bit to install grub onto the new disk # FIXME: use event for when its ready instead of sleep await asyncio.sleep(10) await self.install_grub(boottype, dev)
def get_swap_mirrors(self, filters, options): mirrors = [] geom.scan() klass = geom.class_by_name('MIRROR') if not klass: return mirrors for g in filter(lambda g: not g.name.endswith('.sync'), klass.geoms): mirror_data = { **deepcopy(self.mirror_base), 'name': g.name, 'config_type': g.config.get('Type') if g.config else None, 'path': os.path.join('/dev/mirror', g.name), 'real_path': os.path.join('/dev/mirror', g.name), } if os.path.exists(f'{mirror_data["path"]}.eli'): mirror_data['encrypted_provider'] = f'{mirror_data["path"]}.eli' for c in g.consumers: mirror_data['providers'].append({ 'name': c.provider.name, 'id': c.provider.id, 'disk': c.provider.geom.name }) mirrors.append(mirror_data) return filter_list(mirrors, filters, options)
async def get_disks(self, name): zfs = libzfs.ZFS() try: zpool = zfs.get(name) except libzfs.ZFSException as e: raise CallError(str(e), errno.ENOENT) await self.middleware.threaded(geom.scan) labelclass = geom.class_by_name('LABEL') for absdev in zpool.disks: dev = absdev.replace('/dev/', '').replace('.eli', '') find = labelclass.xml.findall(f".//provider[name='{dev}']/../consumer/provider") name = None if find: name = geom.provider_by_id(find[0].get('ref')).geom.name else: g = geom.geom_by_name('DEV', dev) if g: name = g.consumer.provider.geom.name if name and geom.geom_by_name('DISK', name): yield name else: self.logger.debug(f'Could not find disk for {dev}')
async def attach(self, job, dev, options=None): """ Attach a disk to the boot pool, turning a stripe into a mirror. `expand` option will determine whether the new disk partition will be the maximum available or the same size as the current disk. """ disks = [d async for d in await self.get_disks()] if len(disks) > 1: raise CallError('3-way mirror not supported yet') format_opts = {} if not options['expand']: # Lets try to find out the size of the current freebsd-zfs partition so # the new partition is not bigger, preventing size mismatch if one of # them fail later on. See #21336 await self.middleware.run_in_thread(geom.scan) labelclass = geom.class_by_name('PART') for e in labelclass.xml.findall( f"./geom[name='{disks[0]}']/provider/config[type='freebsd-zfs']" ): format_opts['size'] = int(e.find('./length').text) break try: boottype = await self.format(dev, format_opts) except CallError as e: if "gpart: autofill: No space left on device" in e.errmsg: async def get_diskinfo(dev): diskinfo = { s.split("#")[1].strip(): s.split("#")[0].strip() for s in (await run("/usr/sbin/diskinfo", "-v", dev) ).stdout.decode("utf-8").split("\n") if "#" in s } return { "name": diskinfo.get("Disk descr.", dev), "size_gb": "%.2f" % ((int(diskinfo["mediasize in sectors"]) * int(diskinfo["sectorsize"]) / float(1024**3))), "size_sectors": int(diskinfo["mediasize in sectors"]), } src_info = await get_diskinfo(disks[0]) dst_info = await get_diskinfo(dev) raise CallError(( f"The device called {dst_info['name']} ({dst_info['size_gb']} GB, {dst_info['size_sectors']} " f"sectors does not have enough space to mirror the old device {src_info['name']} " f"({src_info['size_gb']} GB, {src_info['size_sectors']} sectors). Please use a larger device." )) raise await self.middleware.call('zfs.pool.extend', 'freenas-boot', None, [{ 'target': f'{disks[0]}p2', 'type': 'DISK', 'path': f'/dev/{dev}p2' }]) await self.install_loader(boottype, dev)
async def multipath_sync(self): """ Synchronize multipath disks Every distinct GEOM_DISK that shares an ident (aka disk serial) with conjunction of the lunid is considered a multipath and will be handled by GEOM_MULTIPATH. If the disk is not currently in use by some Volume or iSCSI Disk Extent then a gmultipath is automatically created and will be available for use. """ await self.middleware.threaded(geom.scan) mp_disks = [] for g in geom.class_by_name('MULTIPATH').geoms: for c in g.consumers: p_geom = c.provider.geom # For now just DISK is allowed if p_geom.clazz.name != 'DISK': self.logger.warn( "A consumer that is not a disk (%s) is part of a " "MULTIPATH, currently unsupported by middleware", p_geom.clazz.name ) continue mp_disks.append(p_geom.name) reserved = [] async for i in await self.middleware.call('boot.get_disks'): reserved.append(i) # disks already in use count as reserved as well for pool in await self.middleware.call('pool.query'): try: if pool['is_decrypted']: async for i in await self.middleware.call('pool.get_disks', pool['id']): reserved.append(i) except CallError as e: # pool could not be available for some reason if e.errno != errno.ENOENT: raise is_freenas = await self.middleware.call('system.is_freenas') serials = defaultdict(list) active_active = [] for g in geom.class_by_name('DISK').geoms: if not RE_DA.match(g.name) or g.name in reserved or g.name in mp_disks: continue if not is_freenas: descr = g.provider.config.get('descr') or '' if ( descr == 'STEC ZeusRAM' or descr.startswith('VIOLIN') or descr.startswith('3PAR') ): active_active.append(g.name) serial = '' v = g.provider.config.get('ident') if v: serial = v v = g.provider.config.get('lunid') if v: serial += v if not serial: continue size = g.provider.mediasize serials[(serial, size)].append(g.name) serials[(serial, size)].sort(key=lambda x: int(x[2:])) disks_pairs = [disks for disks in list(serials.values())] disks_pairs.sort(key=lambda x: int(x[0][2:])) # Mode is Active/Passive for FreeNAS mode = None if is_freenas else 'R' for disks in disks_pairs: if not len(disks) > 1: continue name = await self.__multipath_next() await self.__multipath_create(name, disks, 'A' if disks[0] in active_active else mode) # Scan again to take new multipaths into account await self.middleware.threaded(geom.scan) mp_ids = [] for g in geom.class_by_name('MULTIPATH').geoms: _disks = [] for c in g.consumers: p_geom = c.provider.geom # For now just DISK is allowed if p_geom.clazz.name != 'DISK': continue _disks.append(p_geom.name) qs = await self.middleware.call('datastore.query', 'storage.disk', [ ['OR', [ ['disk_name', 'in', _disks], ['disk_multipath_member', 'in', _disks], ]], ]) if qs: diskobj = qs[0] mp_ids.append(diskobj['disk_identifier']) update = False # Make sure to not update if nothing changed if diskobj['disk_multipath_name'] != g.name: update = True diskobj['disk_multipath_name'] = g.name if diskobj['disk_name'] in _disks: _disks.remove(diskobj['disk_name']) if _disks and diskobj['disk_multipath_member'] != _disks[-1]: update = True diskobj['disk_multipath_member'] = _disks.pop() if update: await self.middleware.call('datastore.update', 'storage.disk', diskobj['disk_identifier'], diskobj) # Update all disks which were not identified as MULTIPATH, resetting attributes for disk in (await self.middleware.call('datastore.query', 'storage.disk', [('disk_identifier', 'nin', mp_ids)])): if disk['disk_multipath_name'] or disk['disk_multipath_member']: disk['disk_multipath_name'] = '' disk['disk_multipath_member'] = '' await self.middleware.call('datastore.update', 'storage.disk', disk['disk_identifier'], disk)
async def swaps_configure(self): """ Configures swap partitions in the system. We try to mirror all available swap partitions to avoid a system crash in case one of them dies. """ await self.middleware.threaded(geom.scan) used_partitions = set() swap_devices = [] klass = geom.class_by_name('MIRROR') if klass: for g in klass.geoms: # Skip gmirror that is not swap* if not g.name.startswith('swap') or g.name.endswith('.sync'): continue consumers = list(g.consumers) # If the mirror is degraded lets remove it and make a new pair if len(consumers) == 1: c = consumers[0] await self.swaps_remove_disks([c.provider.geom.name]) else: swap_devices.append(f'mirror/{g.name}') for c in consumers: # Add all partitions used in swap, removing .eli used_partitions.add(c.provider.name.strip('.eli')) klass = geom.class_by_name('PART') if not klass: return # Get all partitions of swap type, indexed by size swap_partitions_by_size = defaultdict(list) for g in klass.geoms: for p in g.providers: # if swap partition if p.config['rawtype'] == '516e7cb5-6ecf-11d6-8ff8-00022d09712b': # Try to save a core dump from that await run('savecore', '-z', '-m', '5', '/data/crash/', f'/dev/{p.name}', check=False) if p.name not in used_partitions: swap_partitions_by_size[p.mediasize].append(p.name) dumpdev = False unused_partitions = [] for size, partitions in swap_partitions_by_size.items(): for i in range(int(len(partitions) / 2)): if len(swap_devices) > MIRROR_MAX: break part_a, part_b = partitions[0:2] partitions = partitions[2:] if not dumpdev: dumpdev = await dempdev_configure(part_a) try: name = new_swap_name() await run('gmirror', 'create', '-b', 'prefer', name, part_a, part_b) except Exception: self.logger.warn(f'Failed to create gmirror {name}', exc_info=True) continue swap_devices.append(f'mirror/{name}') # Add remaining partitions to unused list unused_partitions += partitions # If we could not make even a single swap mirror, add the first unused # partition as a swap device if not swap_devices and unused_partitions: if not dumpdev: dumpdev = await dempdev_configure(unused_partitions[0]) swap_devices.append(unused_partitions[0]) for name in swap_devices: if not os.path.exists(f'/dev/{name}.eli'): await run('geli', 'onetime', name) await run('swapon', f'/dev/{name}.eli', check=False) return swap_devices
async def multipath_sync(self): """ Synchronize multipath disks Every distinct GEOM_DISK that shares an ident (aka disk serial) with conjunction of the lunid is considered a multipath and will be handled by GEOM_MULTIPATH. If the disk is not currently in use by some Volume or iSCSI Disk Extent then a gmultipath is automatically created and will be available for use. """ await self.middleware.run_in_thread(geom.scan) mp_disks = [] for g in geom.class_by_name('MULTIPATH').geoms: for c in g.consumers: p_geom = c.provider.geom # For now just DISK is allowed if p_geom.clazz.name != 'DISK': self.logger.warn( "A consumer that is not a disk (%s) is part of a " "MULTIPATH, currently unsupported by middleware", p_geom.clazz.name ) continue mp_disks.append(p_geom.name) reserved = await self.get_reserved() is_freenas = await self.middleware.call('system.is_freenas') serials = defaultdict(list) active_active = [] for g in geom.class_by_name('DISK').geoms: if not RE_DA.match(g.name) or g.name in reserved or g.name in mp_disks: continue if not is_freenas: descr = g.provider.config.get('descr') or '' if ( descr == 'STEC ZeusRAM' or descr.startswith('VIOLIN') or descr.startswith('3PAR') ): active_active.append(g.name) serial = '' v = g.provider.config.get('ident') if v: serial = v v = g.provider.config.get('lunid') if v: serial += v if not serial: continue size = g.provider.mediasize serials[(serial, size)].append(g.name) serials[(serial, size)].sort(key=lambda x: int(x[2:])) disks_pairs = [disks for disks in list(serials.values())] disks_pairs.sort(key=lambda x: int(x[0][2:])) # Mode is Active/Passive for FreeNAS mode = None if is_freenas else 'R' for disks in disks_pairs: if not len(disks) > 1: continue name = await self.__multipath_next() try: await self.multipath_create(name, disks, 'A' if disks[0] in active_active else mode) except CallError as e: self.logger.error("Error creating multipath: %s", e.errmsg) # Scan again to take new multipaths into account await self.middleware.run_in_thread(geom.scan) mp_ids = [] for g in geom.class_by_name('MULTIPATH').geoms: _disks = [] for c in g.consumers: p_geom = c.provider.geom # For now just DISK is allowed if p_geom.clazz.name != 'DISK': continue _disks.append(p_geom.name) qs = await self.middleware.call('datastore.query', 'storage.disk', [ ['OR', [ ['disk_name', 'in', _disks], ['disk_multipath_member', 'in', _disks], ]], ['disk_expiretime', '=', None], ]) if qs: diskobj = qs[0] mp_ids.append(diskobj['disk_identifier']) update = False # Make sure to not update if nothing changed if diskobj['disk_multipath_name'] != g.name: update = True diskobj['disk_multipath_name'] = g.name if diskobj['disk_name'] in _disks: _disks.remove(diskobj['disk_name']) if _disks and diskobj['disk_multipath_member'] != _disks[-1]: update = True diskobj['disk_multipath_member'] = _disks.pop() if update: await self.middleware.call('datastore.update', 'storage.disk', diskobj['disk_identifier'], diskobj) # Update all disks which were not identified as MULTIPATH, resetting attributes for disk in (await self.middleware.call('datastore.query', 'storage.disk', [('disk_identifier', 'nin', mp_ids)])): if disk['disk_multipath_name'] or disk['disk_multipath_member']: disk['disk_multipath_name'] = '' disk['disk_multipath_member'] = '' await self.middleware.call('datastore.update', 'storage.disk', disk['disk_identifier'], disk)
async def wipe(self, job, dev, mode): """ Performs a wipe of a disk `dev`. It can be of the following modes: - QUICK: clean the first few and last megabytes of every partition and disk - FULL: write whole disk with zero's - FULL_RANDOM: write whole disk with random bytes """ await self.swaps_remove_disks([dev]) # First do a quick wipe of every partition to clean things like zfs labels if mode == 'QUICK': await self.middleware.threaded(geom.scan) klass = geom.class_by_name('PART') for g in klass.xml.findall(f'./geom[name=\'{dev}\']'): for p in g.findall('./provider'): size = p.find('./mediasize') if size is not None: try: size = int(size.text) except ValueError: size = None name = p.find('./name') await self.wipe_quick(name.text, size=size) await run('gpart', 'destroy', '-F', f'/dev/{dev}', check=False) # Wipe out the partition table by doing an additional iterate of create/destroy await run('gpart', 'create', '-s', 'gpt', f'/dev/{dev}') await run('gpart', 'destroy', '-F', f'/dev/{dev}') if mode == 'QUICK': await self.wipe_quick(dev) else: cp = await run('diskinfo', dev) size = int(re.sub(r'\s+', ' ', cp.stdout.decode()).split()[2]) proc = await Popen([ 'dd', 'if=/dev/{}'.format('zero' if mode == 'FULL' else 'random'), f'of=/dev/{dev}', 'bs=1m', ], stdout=subprocess.PIPE, stderr=subprocess.PIPE) async def dd_wait(): while True: if proc.returncode is not None: break os.kill(proc.pid, signal.SIGINFO) await asyncio.sleep(1) asyncio.ensure_future(dd_wait()) while True: line = await proc.stderr.readline() if line == b'': break line = line.decode() reg = RE_DD.search(line) if reg: job.set_progress(int(reg.group(1)) / size, extra={'speed': int(reg.group(2))}) await self.sync(dev)