def __init__(self, iname): if iname.startswith("/dev/"): iname = iname[5:] name = DiskRealName(geom.geom_by_name("DEV", iname)) if name is None: raise RuntimeError( "Unable to find real name for disk {}".format(iname)) disk = geom.geom_by_name("DISK", name) if disk: self._geom = disk self._name = name self._size = disk.provider.mediasize self._description = disk.provider.description part_geom = geom.geom_by_name("PART", disk.name) self._parts = [] if part_geom and part_geom.providers: for part in part_geom.providers: part_obj = Partition(type=part.config["type"], index=int(part.config["index"]), size=int(part.config["length"]), label=part.config["label"], disk=self) self._parts.append(part_obj) else: raise RuntimeError("Unable to find disk {}".format(name))
async def device_to_identifier(self, name, disks): disk_data = disks.get('name') or await self.middleware.call( 'device.get_disk', name) if disk_data and disk_data['serial_lunid']: return f'{{serial_lunid}}{disk_data["serial_lunid"]}' elif disk_data and disk_data['serial']: return f'{{serial}}{disk_data["serial"]}' await self.middleware.run_in_thread(geom.scan) klass = geom.class_by_name('PART') if klass: for g in filter(lambda v: v.name == name, klass.geoms): for p in g.providers: if p.config is None: continue if p.config['rawtype'] in await self.middleware.call( 'disk.get_valid_zfs_partition_type_uuids'): return f'{{uuid}}{p.config["rawuuid"]}' g = geom.geom_by_name('LABEL', name) if g: return f'{{label}}{g.provider.name}' g = geom.geom_by_name('DEV', name) if g: return f'{{devicename}}{name}' return ''
async def get_disks(self, name): zfs = libzfs.ZFS() try: zpool = zfs.get(name) except libzfs.ZFSException as e: raise CallError(str(e), errno.ENOENT) await self.middleware.run_in_thread(geom.scan) labelclass = geom.class_by_name('LABEL') for absdev in zpool.disks: dev = absdev.replace('/dev/', '').replace('.eli', '') find = labelclass.xml.findall( f".//provider[name='{dev}']/../consumer/provider") name = None if find: name = geom.provider_by_id(find[0].get('ref')).geom.name else: g = geom.geom_by_name('DEV', dev) if g: name = g.consumer.provider.geom.name if name and geom.geom_by_name('DISK', name): yield name else: self.logger.debug(f'Could not find disk for {dev}')
def get_disks(self, name): try: with libzfs.ZFS() as zfs: disks = list(zfs.get(name).disks) except libzfs.ZFSException as e: raise CallError(str(e), errno.ENOENT) geom.scan() labelclass = geom.class_by_name('LABEL') for absdev in disks: dev = absdev.replace('/dev/', '').replace('.eli', '') find = labelclass.xml.findall( f".//provider[name='{dev}']/../consumer/provider") name = None if find: name = geom.provider_by_id(find[0].get('ref')).geom.name else: g = geom.geom_by_name('DEV', dev) if g: name = g.consumer.provider.geom.name if name and geom.geom_by_name('DISK', name): yield name else: self.logger.debug(f'Could not find disk for {dev}')
def validate_disk(name): """ Given the geom of a disk, let's see if it's appropriate. Innappropriate disks are too small (4gbytes and less), or are already mounted / in use by a pool. (That latter being somewhat harder to tell...) XXX How inefficient is this? """ min_disk_size = 4 * 1024 * 1024 * 1024 used_disks = [] # Start with zfs disks pools = list(zfs.pools) for pool in zfs.pools: for disk in pool.disks: # Remove the beginning "/dev/" disk_name = disk[5:] x = geom.geom_by_name("DEV", disk_name) used_disks.append(DiskRealName(x)) # Now let's go for the mounted disks mounts = bsd.getmntinfo() for mount in mounts: if mount.fstype in ("tmpfs", "devfs"): # It's not a real disk! continue if mount.source.startswith("/dev/"): disk_name = mount.source[5:] x = geom.geom_by_name("DEV", disk_name) used_disks.append(DiskRealName(x)) if name in used_disks: raise ValidationError(code=ValidationCode.DiskInUse, message="Disk {} is in use".format(name)) try: disk = Utils.Disk(name) except RuntimeError: LogIt( "Could not find information about disk {} in validate_disk".format( name)) raise ValidationError( code=ValidationCode.DiskNoInfo, message="No information available for disk {}".format(name)) if disk.size < min_disk_size: LogIt("Disk {} is too small ({}, need 4G at least)".format( name, disk.smart_size)) raise ValidationError( code=ValidationCode.DiskTooSmall, message="Disk {} is too small ({}, need 4G at least)".format( name, disk.smart_size)) return
async def device_to_identifier(self, name): """ Given a device `name` (e.g. da0) returns an unique identifier string for this device. This identifier is in the form of {type}string, "type" can be one of the following: - serial_lunid - for disk serial concatenated with the lunid - serial - disk serial - uuid - uuid of a ZFS GPT partition - label - label name from geom label - devicename - name of the device if any other could not be used/found Returns: str - identifier """ await self.middleware.threaded(geom.scan) g = geom.geom_by_name('DISK', name) if g and g.provider.config.get('ident'): serial = g.provider.config['ident'] lunid = g.provider.config.get('lunid') if lunid: return f'{{serial_lunid}}{serial}_{lunid}' return f'{{serial}}{serial}' serial = await self.serial_from_device(name) if serial: return f'{{serial}}{serial}' klass = geom.class_by_name('PART') if klass: for g in klass.geoms: for p in g.providers: if p.name == name: # freebsd-zfs partition if p.config[ 'rawtype'] == '516e7cba-6ecf-11d6-8ff8-00022d09712b': return f'{{uuid}}{p.config["rawuuid"]}' g = geom.geom_by_name('LABEL', name) if g: return f'{{label}}{g.provider.name}' g = geom.geom_by_name('DEV', name) if g: return f'{{devicename}}{name}' return ''
async def device_to_identifier(self, name): """ Given a device `name` (e.g. da0) returns an unique identifier string for this device. This identifier is in the form of {type}string, "type" can be one of the following: - serial_lunid - for disk serial concatenated with the lunid - serial - disk serial - uuid - uuid of a ZFS GPT partition - label - label name from geom label - devicename - name of the device if any other could not be used/found Returns: str - identifier """ await self.middleware.threaded(geom.scan) g = geom.geom_by_name('DISK', name) if g and g.provider.config.get('ident'): serial = g.provider.config['ident'] lunid = g.provider.config.get('lunid') if lunid: return f'{{serial_lunid}}{serial}_{lunid}' return f'{{serial}}{serial}' serial = await self.serial_from_device(name) if serial: return f'{{serial}}{serial}' klass = geom.class_by_name('PART') if klass: for g in klass.geoms: for p in g.providers: if p.name == name: # freebsd-zfs partition if p.config['rawtype'] == '516e7cba-6ecf-11d6-8ff8-00022d09712b': return f'{{uuid}}{p.config["rawuuid"]}' g = geom.geom_by_name('LABEL', name) if g: return f'{{label}}{g.provider.name}' g = geom.geom_by_name('DEV', name) if g: return f'{{devicename}}{name}' return ''
def DiskInfo(name): """ Return a dictionary with name, size, and description values """ if name.startswith("/dev/"): LogIt("Tryiing geom_by_name(DEV, {})".format(name[5:])) name = DiskRealName(geom.geom_by_name("DEV", name[5:])) LogIt("Trying geom_by_name(DISK, {})".format(name)) disk = geom.geom_by_name("DISK", name) if disk: return { "name": name, "size": disk.provider.mediasize, "description": disk.provider.description, "geom": disk, } else: return {}
async def sync(self, name): """ Syncs a disk `name` with the database cache. """ # Skip sync disks on backup node if ( not await self.middleware.call('system.is_freenas') and await self.middleware.call('notifier.failover_licensed') and await self.middleware.call('notifier.failover_status') == 'BACKUP' ): return # Do not sync geom classes like multipath/hast/etc if name.find("/") != -1: return disks = list((await self.middleware.call('device.get_info', 'DISK')).keys()) # Abort if the disk is not recognized as an available disk if name not in disks: return ident = await self.device_to_identifier(name) qs = await self.middleware.call('datastore.query', 'storage.disk', [('disk_identifier', '=', ident)], {'order_by': ['disk_expiretime']}) if ident and qs: disk = qs[0] new = False else: new = True qs = await self.middleware.call('datastore.query', 'storage.disk', [('disk_name', '=', name)]) for i in qs: i['disk_expiretime'] = datetime.utcnow() + timedelta(days=DISK_EXPIRECACHE_DAYS) await self.middleware.call('datastore.update', 'storage.disk', i['disk_identifier'], i) disk = {'disk_identifier': ident} disk.update({'disk_name': name, 'disk_expiretime': None}) await self.middleware.threaded(geom.scan) g = geom.geom_by_name('DISK', name) if g: if g.provider.config['ident']: disk['disk_serial'] = g.provider.config['ident'] if g.provider.mediasize: disk['disk_size'] = g.provider.mediasize if not disk.get('disk_serial'): disk['disk_serial'] = await self.serial_from_device(name) or '' reg = RE_DSKNAME.search(name) if reg: disk['disk_subsystem'] = reg.group(1) disk['disk_number'] = int(reg.group(2)) if not new: await self.middleware.call('datastore.update', 'storage.disk', disk['disk_identifier'], disk) else: disk['disk_identifier'] = await self.middleware.call('datastore.insert', 'storage.disk', disk) # FIXME: use a truenas middleware plugin await self.middleware.call('notifier.sync_disk_extra', disk['disk_identifier'], False)
async def sync(self, name): """ Syncs a disk `name` with the database cache. """ # Skip sync disks on backup node if ( not await self.middleware.call('system.is_freenas') and await self.middleware.call('notifier.failover_licensed') and await self.middleware.call('notifier.failover_status') == 'BACKUP' ): return # Do not sync geom classes like multipath/hast/etc if name.find("/") != -1: return disks = list((await self.middleware.call('device.get_info', 'DISK')).keys()) # Abort if the disk is not recognized as an available disk if name not in disks: return ident = await self.device_to_identifier(name) qs = await self.middleware.call('datastore.query', 'storage.disk', [('disk_identifier', '=', ident)], {'order_by': ['disk_expiretime']}) if ident and qs: disk = qs[0] new = False else: new = True qs = await self.middleware.call('datastore.query', 'storage.disk', [('disk_name', '=', name)]) for i in qs: i['disk_expiretime'] = datetime.utcnow() + timedelta(days=DISK_EXPIRECACHE_DAYS) await self.middleware.call('datastore.update', 'storage.disk', i['disk_identifier'], i) disk = {'disk_identifier': ident} disk.update({'disk_name': name, 'disk_expiretime': None}) await self.middleware.run_in_thread(geom.scan) g = geom.geom_by_name('DISK', name) if g: if g.provider.config['ident']: disk['disk_serial'] = g.provider.config['ident'] if g.provider.mediasize: disk['disk_size'] = g.provider.mediasize if not disk.get('disk_serial'): disk['disk_serial'] = await self.serial_from_device(name) or '' reg = RE_DSKNAME.search(name) if reg: disk['disk_subsystem'] = reg.group(1) disk['disk_number'] = int(reg.group(2)) if not new: await self.middleware.call('datastore.update', 'storage.disk', disk['disk_identifier'], disk) else: disk['disk_identifier'] = await self.middleware.call('datastore.insert', 'storage.disk', disk) # FIXME: use a truenas middleware plugin await self.middleware.call('notifier.sync_disk_extra', disk['disk_identifier'], False)
def get_disks(self, name): disks = self.get_devices(name) geom.scan() labelclass = geom.class_by_name('LABEL') for dev in disks: dev = dev.replace('.eli', '') find = labelclass.xml.findall(f".//provider[name='{dev}']/../consumer/provider") name = None if find: name = geom.provider_by_id(find[0].get('ref')).geom.name else: g = geom.geom_by_name('DEV', dev) if g: name = g.consumer.provider.geom.name if name and (name.startswith('multipath/') or geom.geom_by_name('DISK', name)): yield name else: self.logger.debug(f'Could not find disk for {dev}')
def device_to_identifier(name, serial=None): gdisk = geom.geom_by_name("DISK", name) if not gdisk: return None if "lunid" in gdisk.provider.config: return "lunid:{0}".format(gdisk.provider.config["lunid"]) if serial: return "serial:{0}".format(serial) gpart = geom.geom_by_name("PART", name) if gpart: for i in gpart.providers: if i.config["type"] in ("freebsd-zfs", "freebsd-ufs"): return "uuid:{0}".format(i.config["rawuuid"]) glabel = geom.geom_by_name("LABEL", name) if glabel and glabel.provider: return "label:{0}".format(glabel.provider.name) return "devicename:{0}".format(os.path.join("/dev", name))
def device_to_identifier(name, serial=None): gdisk = geom.geom_by_name('DISK', name) if not gdisk: return None if 'lunid' in gdisk.provider.config: return "lunid:{0}".format(gdisk.provider.config['lunid']) if serial: return "serial:{0}".format(serial) gpart = geom.geom_by_name('PART', name) if gpart: for i in gpart.providers: if i.config['type'] in ('freebsd-zfs', 'freebsd-ufs'): return "uuid:{0}".format(i.config['rawuuid']) glabel = geom.geom_by_name('LABEL', name) if glabel and glabel.provider: return "label:{0}".format(glabel.provider.name) return "devicename:{0}".format(os.path.join('/dev', name))
async def swaps_remove_disks(self, disks): """ Remove a given disk (e.g. ["da0", "da1"]) from swap. it will offline if from swap, remove it from the gmirror (if exists) and detach the geli. """ await self.middleware.run_in_thread(geom.scan) providers = {} for disk in disks: partgeom = geom.geom_by_name('PART', disk) if not partgeom: continue for p in partgeom.providers: if p.config[ 'rawtype'] == '516e7cb5-6ecf-11d6-8ff8-00022d09712b': providers[p.id] = p break if not providers: return klass = geom.class_by_name('MIRROR') if not klass: return mirrors = set() for g in klass.geoms: for c in g.consumers: if c.provider.id in providers: mirrors.add(g.name) del providers[c.provider.id] swapinfo_devs = [s.devname for s in getswapinfo()] for name in mirrors: devname = f'mirror/{name}.eli' devpath = f'/dev/{devname}' if devname in swapinfo_devs: await run('swapoff', devpath) if os.path.exists(devpath): await run('geli', 'detach', devname) await run('gmirror', 'destroy', name) for p in providers.values(): devname = f'{p.name}.eli' if devname in swapinfo_devs: await run('swapoff', f'/dev/{devname}') if os.path.exists(f'/dev/{devname}'): await run('geli', 'detach', devname)
async def get_disks(self, name): zfs = libzfs.ZFS() try: zpool = zfs.get(name) except libzfs.ZFSException as e: raise CallError(str(e), errno.ENOENT) await self.middleware.threaded(geom.scan) labelclass = geom.class_by_name('LABEL') for absdev in zpool.disks: dev = absdev.replace('/dev/', '').replace('.eli', '') find = labelclass.xml.findall(f".//provider[name='{dev}']/../consumer/provider") name = None if find: name = geom.provider_by_id(find[0].get('ref')).geom.name else: g = geom.geom_by_name('DEV', dev) if g: name = g.consumer.provider.geom.name if name and geom.geom_by_name('DISK', name): yield name else: self.logger.debug(f'Could not find disk for {dev}')
async def serial_from_device(self, name): args = await self.__get_smartctl_args(name) if args: p1 = await Popen(['smartctl', '-i'] + args, stdout=subprocess.PIPE) output = (await p1.communicate())[0].decode() search = re.search(r'Serial Number:\s+(?P<serial>.+)', output, re.I) if search: return search.group('serial') await self.middleware.run_in_thread(geom.scan) g = geom.geom_by_name('DISK', name) if g and g.provider.config.get('ident'): return g.provider.config['ident'] return None
def generate_disk_cache(dispatcher, path): diskinfo_cache_lock.acquire() geom.scan() name = os.path.basename(path) gdisk = geom.geom_by_name('DISK', name) multipath_info = None disk_info = info_from_device(gdisk.name) serial = disk_info['serial'] identifier = device_to_identifier(name, serial) ds_disk = dispatcher.datastore.get_by_id('disks', identifier) # Path repesents disk device (not multipath device) and has NAA ID attached lunid = gdisk.provider.config.get('lunid') if lunid: # Check if device could be part of multipath configuration d = get_disk_by_lunid(lunid) if (d and d['path'] != path) or (ds_disk and ds_disk['is_multipath']): multipath_info = attach_to_multipath(dispatcher, d, ds_disk, path) provider = gdisk.provider try: camdev = CamDevice(gdisk.name) except RuntimeError: camdev = None disk = wrap({ 'path': path, 'is_multipath': False, 'description': provider.config['descr'], 'serial': serial, 'lunid': provider.config.get('lunid'), 'model': disk_info['model'], 'interface': disk_info['interface'], 'is_ssd': disk_info['is_ssd'], 'id': identifier, 'controller': camdev.__getstate__() if camdev else None, }) if multipath_info: disk.update(multipath_info) diskinfo_cache.put(identifier, disk) update_disk_cache(dispatcher, path) dispatcher.call_sync('disks.configure_disk', identifier) logger.info('Added <%s> (%s) to disk cache', identifier, disk['description']) diskinfo_cache_lock.release()
async def swaps_remove_disks(self, disks): """ Remove a given disk (e.g. ["da0", "da1"]) from swap. it will offline if from swap, remove it from the gmirror (if exists) and detach the geli. """ await self.middleware.run_in_thread(geom.scan) providers = {} for disk in disks: partgeom = geom.geom_by_name('PART', disk) if not partgeom: continue for p in partgeom.providers: if p.config['rawtype'] == '516e7cb5-6ecf-11d6-8ff8-00022d09712b': providers[p.id] = p break if not providers: return klass = geom.class_by_name('MIRROR') if not klass: return mirrors = set() for g in klass.geoms: for c in g.consumers: if c.provider.id in providers: mirrors.add(g.name) del providers[c.provider.id] swapinfo_devs = [s.devname for s in getswapinfo()] for name in mirrors: devname = f'mirror/{name}.eli' devpath = f'/dev/{devname}' if devname in swapinfo_devs: await run('swapoff', devpath) if os.path.exists(devpath): await run('geli', 'detach', devname) await run('gmirror', 'destroy', name) for p in providers.values(): devname = f'{p.name}.eli' if devname in swapinfo_devs: await run('swapoff', f'/dev/{devname}') if os.path.exists(f'/dev/{devname}'): await run('geli', 'detach', devname)
def generate_disk_cache(dispatcher, path): diskinfo_cache_lock.acquire() geom.scan() name = os.path.basename(path) gdisk = geom.geom_by_name('DISK', name) multipath_info = None disk_info = info_from_device(gdisk.name) serial = disk_info['serial'] identifier = device_to_identifier(name, serial) ds_disk = dispatcher.datastore.get_by_id('disks', identifier) # Path repesents disk device (not multipath device) and has NAA ID attached lunid = gdisk.provider.config.get('lunid') if lunid: # Check if device could be part of multipath configuration d = get_disk_by_lunid(lunid) if (d and d['path'] != path) or (ds_disk and ds_disk['is_multipath']): multipath_info = attach_to_multipath(dispatcher, d, ds_disk, path) provider = gdisk.provider camdev = CamDevice(gdisk.name) disk = wrap({ 'path': path, 'is_multipath': False, 'description': provider.config['descr'], 'serial': serial, 'lunid': provider.config.get('lunid'), 'model': disk_info['model'], 'interface': disk_info['interface'], 'is_ssd': disk_info['is_ssd'], 'id': identifier, 'controller': camdev.__getstate__(), }) if multipath_info: disk.update(multipath_info) diskinfo_cache.put(identifier, disk) update_disk_cache(dispatcher, path) dispatcher.call_sync('disks.configure_disk', identifier) logger.info('Added <%s> (%s) to disk cache', identifier, disk['description']) diskinfo_cache_lock.release()
async def sync_all(self): """ Synchronyze all disks with the cache in database. """ # Skip sync disks on backup node if (not await self.middleware.call('system.is_freenas') and await self.middleware.call('notifier.failover_licensed') and await self.middleware.call('notifier.failover_status') == 'BACKUP'): return sys_disks = list((await self.middleware.call('device.get_info', 'DISK')).keys()) seen_disks = {} serials = [] await self.middleware.threaded(geom.scan) for disk in (await self.middleware.call('datastore.query', 'storage.disk', [], {'order_by': ['disk_expiretime']})): name = await self.middleware.call('notifier.identifier_to_device', disk['disk_identifier']) if not name or name in seen_disks: # If we cant translate the indentifier to a device, give up # If name has already been seen once then we are probably # dealing with with multipath here if not disk['disk_expiretime']: disk['disk_expiretime'] = datetime.utcnow() + timedelta( days=DISK_EXPIRECACHE_DAYS) await self.middleware.call('datastore.update', 'storage.disk', disk['disk_identifier'], disk) elif disk['disk_expiretime'] < datetime.utcnow(): # Disk expire time has surpassed, go ahead and remove it await self.middleware.call('datastore.delete', 'storage.disk', disk['disk_identifier']) continue else: disk['disk_expiretime'] = None disk['disk_name'] = name reg = RE_DSKNAME.search(name) if reg: disk['disk_subsystem'] = reg.group(1) disk['disk_number'] = int(reg.group(2)) serial = '' g = geom.geom_by_name('DISK', name) if g: if g.provider.config['ident']: serial = disk['disk_serial'] = g.provider.config['ident'] serial += g.provider.config.get('lunid') or '' if g.provider.mediasize: disk['disk_size'] = g.provider.mediasize if not disk.get('disk_serial'): serial = disk['disk_serial'] = await self.serial_from_device( name) or '' if serial: serials.append(serial) # If for some reason disk is not identified as a system disk # mark it to expire. if name not in sys_disks and not disk['disk_expiretime']: disk['disk_expiretime'] = datetime.utcnow() + timedelta( days=DISK_EXPIRECACHE_DAYS) await self.middleware.call('datastore.update', 'storage.disk', disk['disk_identifier'], disk) # FIXME: use a truenas middleware plugin await self.middleware.call('notifier.sync_disk_extra', disk['disk_identifier'], False) seen_disks[name] = disk for name in sys_disks: if name not in seen_disks: disk_identifier = await self.device_to_identifier(name) qs = await self.middleware.call( 'datastore.query', 'storage.disk', [('disk_identifier', '=', disk_identifier)]) if qs: new = False disk = qs[0] else: new = True disk = {'disk_identifier': disk_identifier} disk['disk_name'] = name serial = '' g = geom.geom_by_name('DISK', name) if g: if g.provider.config['ident']: serial = disk['disk_serial'] = g.provider.config[ 'ident'] serial += g.provider.config.get('lunid') or '' if g.provider.mediasize: disk['disk_size'] = g.provider.mediasize if not disk.get('disk_serial'): serial = disk[ 'disk_serial'] = await self.serial_from_device(name ) or '' if serial: if serial in serials: # Probably dealing with multipath here, do not add another continue else: serials.append(serial) reg = RE_DSKNAME.search(name) if reg: disk['disk_subsystem'] = reg.group(1) disk['disk_number'] = int(reg.group(2)) if not new: await self.middleware.call('datastore.update', 'storage.disk', disk['disk_identifier'], disk) else: disk['disk_identifier'] = await self.middleware.call( 'datastore.insert', 'storage.disk', disk) # FIXME: use a truenas middleware plugin await self.middleware.call('notifier.sync_disk_extra', disk['disk_identifier'], True)
def update_disk_cache(dispatcher, path): geom.scan() name = os.path.basename(path) gdisk = geom.geom_by_name('DISK', name) gpart = geom.geom_by_name('PART', name) gmultipath = geom.geom_by_name('MULTIPATH', path.split('/')[-1]) disk = get_disk_by_path(path) if not disk: return old_id = disk['id'] if gmultipath: # Path represents multipath device (not disk device) # MEDIACHANGE event -> use first member for hardware queries cons = gmultipath.consumers.next() gdisk = cons.provider.geom if not gdisk: return disk_info = info_from_device(gdisk.name) serial = disk_info['serial'] provider = gdisk.provider partitions = list(generate_partitions_list(gpart)) identifier = device_to_identifier(name, serial) data_part = first_or_default(lambda x: x['type'] == 'freebsd-zfs', partitions) data_uuid = data_part["uuid"] if data_part else None swap_part = first_or_default(lambda x: x['type'] == 'freebsd-swap', partitions) swap_uuid = swap_part["uuid"] if swap_part else None disk.update({ 'mediasize': provider.mediasize, 'sectorsize': provider.sectorsize, 'max_rotation': disk_info['max_rotation'], 'smart_capable': disk_info['smart_capable'], 'smart_enabled': disk_info['smart_enabled'], 'smart_status': disk_info['smart_status'], 'id': identifier, 'schema': gpart.config.get('scheme') if gpart else None, 'partitions': partitions, 'data_partition_uuid': data_uuid, 'data_partition_path': os.path.join("/dev/gptid", data_uuid) if data_uuid else None, 'swap_partition_uuid': swap_uuid, 'swap_partition_path': os.path.join("/dev/gptid", swap_uuid) if swap_uuid else None, }) if gmultipath: disk['multipath.status'] = gmultipath.config['State'] disk['multipath.mode'] = gmultipath.config['Mode'] disk['multipath.uuid'] = gmultipath.config['UUID'] # Purge old cache entry if identifier has changed if old_id != identifier: logger.debug('Removing disk cache entry for <%s> because identifier changed', old_id) diskinfo_cache.remove(old_id) diskinfo_cache.put(identifier, disk) dispatcher.datastore.delete('disks', old_id) persist_disk(dispatcher, disk)
async def sync_all(self): """ Synchronyze all disks with the cache in database. """ # Skip sync disks on backup node if ( not await self.middleware.call('system.is_freenas') and await self.middleware.call('notifier.failover_licensed') and await self.middleware.call('notifier.failover_status') == 'BACKUP' ): return sys_disks = list((await self.middleware.call('device.get_info', 'DISK')).keys()) seen_disks = {} serials = [] await self.middleware.threaded(geom.scan) for disk in (await self.middleware.call('datastore.query', 'storage.disk', [], {'order_by': ['disk_expiretime']})): name = await self.middleware.call('notifier.identifier_to_device', disk['disk_identifier']) if not name or name in seen_disks: # If we cant translate the indentifier to a device, give up # If name has already been seen once then we are probably # dealing with with multipath here if not disk['disk_expiretime']: disk['disk_expiretime'] = datetime.utcnow() + timedelta(days=DISK_EXPIRECACHE_DAYS) await self.middleware.call('datastore.update', 'storage.disk', disk['disk_identifier'], disk) elif disk['disk_expiretime'] < datetime.utcnow(): # Disk expire time has surpassed, go ahead and remove it await self.middleware.call('datastore.delete', 'storage.disk', disk['disk_identifier']) continue else: disk['disk_expiretime'] = None disk['disk_name'] = name reg = RE_DSKNAME.search(name) if reg: disk['disk_subsystem'] = reg.group(1) disk['disk_number'] = int(reg.group(2)) serial = '' g = geom.geom_by_name('DISK', name) if g: if g.provider.config['ident']: serial = disk['disk_serial'] = g.provider.config['ident'] serial += g.provider.config.get('lunid') or '' if g.provider.mediasize: disk['disk_size'] = g.provider.mediasize if not disk.get('disk_serial'): serial = disk['disk_serial'] = await self.serial_from_device(name) or '' if serial: serials.append(serial) # If for some reason disk is not identified as a system disk # mark it to expire. if name not in sys_disks and not disk['disk_expiretime']: disk['disk_expiretime'] = datetime.utcnow() + timedelta(days=DISK_EXPIRECACHE_DAYS) await self.middleware.call('datastore.update', 'storage.disk', disk['disk_identifier'], disk) # FIXME: use a truenas middleware plugin await self.middleware.call('notifier.sync_disk_extra', disk['disk_identifier'], False) seen_disks[name] = disk for name in sys_disks: if name not in seen_disks: disk_identifier = await self.device_to_identifier(name) qs = await self.middleware.call('datastore.query', 'storage.disk', [('disk_identifier', '=', disk_identifier)]) if qs: new = False disk = qs[0] else: new = True disk = {'disk_identifier': disk_identifier} disk['disk_name'] = name serial = '' g = geom.geom_by_name('DISK', name) if g: if g.provider.config['ident']: serial = disk['disk_serial'] = g.provider.config['ident'] serial += g.provider.config.get('lunid') or '' if g.provider.mediasize: disk['disk_size'] = g.provider.mediasize if not disk.get('disk_serial'): serial = disk['disk_serial'] = await self.serial_from_device(name) or '' if serial: if serial in serials: # Probably dealing with multipath here, do not add another continue else: serials.append(serial) reg = RE_DSKNAME.search(name) if reg: disk['disk_subsystem'] = reg.group(1) disk['disk_number'] = int(reg.group(2)) if not new: await self.middleware.call('datastore.update', 'storage.disk', disk['disk_identifier'], disk) else: disk['disk_identifier'] = await self.middleware.call('datastore.insert', 'storage.disk', disk) # FIXME: use a truenas middleware plugin await self.middleware.call('notifier.sync_disk_extra', disk['disk_identifier'], True)
def update_disk_cache(dispatcher, path): geom.scan() name = os.path.basename(path) gdisk = geom.geom_by_name('DISK', name) gpart = geom.geom_by_name('PART', name) gmultipath = geom.geom_by_name('MULTIPATH', path.split('/')[-1]) disk = get_disk_by_path(path) if not disk: return old_id = disk['id'] if gmultipath: # Path represents multipath device (not disk device) # MEDIACHANGE event -> use first member for hardware queries cons = gmultipath.consumers.next() gdisk = cons.provider.geom if not gdisk: return disk_info = info_from_device(gdisk.name) serial = disk_info['serial'] provider = gdisk.provider partitions = list(generate_partitions_list(gpart)) identifier = device_to_identifier(name, serial) data_part = first_or_default(lambda x: x['type'] == 'freebsd-zfs', partitions) data_uuid = data_part["uuid"] if data_part else None swap_part = first_or_default(lambda x: x['type'] == 'freebsd-swap', partitions) swap_uuid = swap_part["uuid"] if swap_part else None disk.update({ 'mediasize': provider.mediasize, 'sectorsize': provider.sectorsize, 'max_rotation': disk_info['max_rotation'], 'smart_capable': disk_info['smart_capable'], 'smart_enabled': disk_info['smart_enabled'], 'smart_status': disk_info['smart_status'], 'id': identifier, 'schema': gpart.config.get('scheme') if gpart else None, 'partitions': partitions, 'data_partition_uuid': data_uuid, 'data_partition_path': os.path.join("/dev/gptid", data_uuid) if data_uuid else None, 'swap_partition_uuid': swap_uuid, 'swap_partition_path': os.path.join("/dev/gptid", swap_uuid) if swap_uuid else None, }) if gmultipath: disk['multipath.status'] = gmultipath.config['State'] disk['multipath.mode'] = gmultipath.config['Mode'] disk['multipath.uuid'] = gmultipath.config['UUID'] # Purge old cache entry if identifier has changed if old_id != identifier: logger.debug( 'Removing disk cache entry for <%s> because identifier changed', old_id) diskinfo_cache.remove(old_id) diskinfo_cache.put(identifier, disk) dispatcher.datastore.delete('disks', old_id) persist_disk(dispatcher, disk)
def attach_to_multipath(dispatcher, disk, ds_disk, path): if not disk and ds_disk: logger.info("Device node %s <%s> is marked as multipath, creating single-node multipath", path, ds_disk['serial']) nodename = os.path.basename(ds_disk['path']) logger.info('Reusing %s path', nodename) # Degenerated single-disk multipath try: dispatcher.exec_and_wait_for_event( 'system.device.attached', lambda args: args['path'] == '/dev/multipath/{0}'.format(nodename), lambda: system('/sbin/gmultipath', 'create', nodename, path) ) except SubprocessException as e: logger.warning('Cannot create multipath: {0}'.format(e.err)) return ret = { 'is_multipath': True, 'path': os.path.join('/dev/multipath', nodename), } elif disk: logger.info("Device node %s is another path to disk <%s> (%s)", path, disk['id'], disk['description']) if disk['is_multipath']: if path in disk['multipath.members']: # Already added return # Attach new disk try: system('/sbin/gmultipath', 'add', disk['multipath.node'], path) except SubprocessException as e: logger.warning('Cannot attach {0} to multipath: {0}'.format(path, e.err)) return nodename = disk['multipath.node'] ret = { 'is_multipath': True, 'path': os.path.join('/dev/multipath', disk['multipath.node']), } else: # Create new multipath logger.info('Creating new multipath device') # If disk was previously tied to specific cdev path (/dev/multipath[0-9]+) # reuse that path. Otherwise pick up first multipath device name available if ds_disk and ds_disk['is_multipath']: nodename = os.path.basename(ds_disk['path']) logger.info('Reusing %s path', nodename) else: nodename = get_multipath_name() logger.info('Using new %s path', nodename) try: dispatcher.exec_and_wait_for_event( 'system.device.attached', lambda args: args['path'] == '/dev/multipath/{0}'.format(nodename), lambda: system('/sbin/gmultipath', 'create', nodename, disk['path'], path) ) except SubprocessException as e: logger.warning('Cannot create multipath: {0}'.format(e.err)) return ret = { 'is_multipath': True, 'path': os.path.join('/dev/multipath', nodename), } geom.scan() gmultipath = geom.geom_by_name('MULTIPATH', nodename) ret['multipath'] = generate_multipath_info(gmultipath) return ret