def _scan(self): disks = scan_disks(settings.MIN_DISK_SIZE) for d in disks: dob = None if (Disk.objects.filter(name=d.name).exists()): dob = Disk.objects.get(name=d.name) dob.serial = d.serial elif (Disk.objects.filter(serial=d.serial).exists()): dob = Disk.objects.get(serial=d.serial) dob.name = d.name else: new_disk = Disk(name=d.name, size=d.size, parted=d.parted, btrfs_uuid=d.btrfs_uuid, model=d.model, serial=d.serial, transport=d.transport, vendor=d.vendor) new_disk.save() if (dob is not None): dob.size = d.size dob.parted = d.parted dob.offline = False dob.model = d.model dob.transport = d.transport dob.vendor = d.vendor dob.btrfs_uuid = d.btrfs_uuid if (Pool.objects.filter(name=d.label).exists()): dob.pool = Pool.objects.get(name=d.label) dob.save() for do in Disk.objects.all(): if (do.name not in [d.name for d in disks]): do.offline = True do.save() ds = DiskInfoSerializer(Disk.objects.all().order_by('name'), many=True) return Response(ds.data)
def _update_disk_state(): disks = scan_disks(settings.MIN_DISK_SIZE) for d in disks: dob = None if (Disk.objects.filter(name=d.name).exists()): dob = Disk.objects.get(name=d.name) dob.serial = d.serial elif (Disk.objects.filter(serial=d.serial).exists()): dob = Disk.objects.get(serial=d.serial) dob.name = d.name else: dob = Disk(name=d.name, size=d.size, parted=d.parted, btrfs_uuid=d.btrfs_uuid, model=d.model, serial=d.serial, transport=d.transport, vendor=d.vendor) dob.size = d.size dob.parted = d.parted dob.offline = False dob.model = d.model dob.transport = d.transport dob.vendor = d.vendor dob.btrfs_uuid = d.btrfs_uuid if (d.fstype is not None and d.fstype != 'btrfs'): dob.btrfs_uuid = None dob.parted = True if (Pool.objects.filter(name=d.label).exists()): dob.pool = Pool.objects.get(name=d.label) else: dob.pool = None if (dob.pool is None and d.root is True): p = Pool(name=settings.ROOT_POOL, raid='single') p.disk_set.add(dob) p.save() dob.pool = p dob.save() p.size = pool_usage(mount_root(p))[0] enable_quota(p) p.uuid = btrfs_uuid(dob.name) p.save() dob.save() for do in Disk.objects.all(): if (do.name not in [d.name for d in disks]): do.offline = True else: try: # for non ata/sata drives do.smart_available, do.smart_enabled = smart.available(do.name) except Exception, e: logger.exception(e) do.smart_available = do.smart_enabled = False do.save()
def btrfs_disk_scan(): data = scan_disks(MIN_DISK_SIZE) # print data disks = [] for d in data: dob = {} if d.name == "centos-root": continue dob["name"] = d.name dob["size"] = d.size dob["parted"] = d.parted dob["offline"] = False dob["model"] = d.model dob["transport"] = d.transport dob["vendor"] = d.vendor dob["btrfs_uuid"] = d.btrfs_uuid if (d.fstype is not None and d.fstype != 'btrfs'): dob["btrfs_uuid"] = None dob["parted"] = True if d.fstype == 'isw_raid_member' \ or d.fstype == 'linux_raid_member': dob["role"] = d.fstype else: dob["role"] = None if (d.root is True): dob["role"] = 'root' dob["pool"] = d.label dob["serial"] = d.serial disks.append(dob) for do in disks: # print "for" # if (not do["offline"]): if (re.match("vd|md|mmcblk", do["name"]) is not None): do["smart_available"] = False do["smart_enabled"] = False continue try: do["smart_available"], do["smart_enabled"] = smart.available( do["name"], "") except Exception, e: # logger.exception(e) do["smart_available"] = False do["smart_enabled"] = False
def _scan(self): disks = scan_disks(settings.MIN_DISK_SIZE) for d in disks: dob = None if (Disk.objects.filter(name=d.name).exists()): dob = Disk.objects.get(name=d.name) dob.serial = d.serial elif (Disk.objects.filter(serial=d.serial).exists()): dob = Disk.objects.get(serial=d.serial) dob.name = d.name else: dob = Disk(name=d.name, size=d.size, parted=d.parted, btrfs_uuid=d.btrfs_uuid, model=d.model, serial=d.serial, transport=d.transport, vendor=d.vendor) dob.size = d.size dob.parted = d.parted dob.offline = False dob.model = d.model dob.transport = d.transport dob.vendor = d.vendor dob.btrfs_uuid = d.btrfs_uuid if (d.fstype is not None and d.fstype != 'btrfs'): dob.btrfs_uuid = None dob.parted = True if (Pool.objects.filter(name=d.label).exists()): dob.pool = Pool.objects.get(name=d.label) else: dob.pool = None if (dob.pool is None and d.root is True): self._create_root_pool(dob) dob.save() for do in Disk.objects.all(): if (do.name not in [d.name for d in disks]): do.offline = True else: try: # for non ata/sata drives do.smart_available, do.smart_enabled = smart.available(do.name) except Exception, e: logger.exception(e) do.smart_available = do.smart_enabled = False do.save()
def _update_disk_state(): """ A db atomic method to update the database of attached disks / drives. Works only on device serial numbers for drive identification. Calls scan_disks to establish the current connected drives info. Initially removes duplicate by serial number db entries to deal with legacy db states and obfuscates all previous device names as they are transient. The drive database is then updated with the attached disks info and previously known drives no longer found attached are marked as offline. All offline drives have their SMART availability and activation status removed and all attached drives have their SMART availability assessed and activated if available. :return: serialized models of attached and missing disks via serial num """ # Acquire a list (namedtupil collection) of attached drives > min size disks = scan_disks(settings.MIN_DISK_SIZE) serial_numbers_seen = [] # Make sane our db entries in view of what we know we have attached. # Device serial number is only known external unique entry, scan_disks # make this so in the case of empty or repeat entries by providing # fake serial numbers which are in turn flagged via WebUI as unreliable. # 1) scrub all device names with unique but nonsense uuid4 # 1) mark all offline disks as such via db flag # 2) mark all offline disks smart available and enabled flags as False logger.info('update_disk_state() Called') for do in Disk.objects.all(): # Replace all device names with a unique placeholder on each scan # N.B. do not optimize by re-using uuid index as this could lead # to a non refreshed webui acting upon an entry that is different # from that shown to the user. do.name = str(uuid.uuid4()).replace('-', '') # 32 chars long # Delete duplicate or fake by serial number db disk entries. # It makes no sense to save fake serial number drives between scans # as on each scan the serial number is re-generated anyway. if (do.serial in serial_numbers_seen) or (len(do.serial) == 48): logger.info('Deleting duplicate or fake (by serial) Disk db ' 'entry. Serial = %s' % do.serial) do.delete() # django >=1.9 returns a dict of deleted items. # Continue onto next db disk object as nothing more to process. continue # first encounter of this serial in the db so stash it for reference serial_numbers_seen.append(deepcopy(do.serial)) # Look for devices (by serial number) that are in the db but not in # our disk scan, ie offline / missing. if (do.serial not in [d.serial for d in disks]): # update the db entry as offline do.offline = True # disable S.M.A.R.T available and enabled flags. do.smart_available = do.smart_enabled = False do.save() # make sure all updates are flushed to db # Our db now has no device name info as all dev names are place holders. # Iterate over attached drives to update the db's knowledge of them. # Kernel dev names are unique so safe to overwrite our db unique name. for d in disks: # start with an empty disk object dob = None # If the db has an entry with this disk's serial number then # use this db entry and update the device name from our recent scan. if (Disk.objects.filter(serial=d.serial).exists()): dob = Disk.objects.get(serial=d.serial) dob.name = d.name else: # We have an assumed new disk entry as no serial match in db. # Build a new entry for this disk. dob = Disk(name=d.name, serial=d.serial) # Update the db disk object (existing or new) with our scanned info dob.size = d.size dob.parted = d.parted dob.offline = False # as we are iterating over attached devices dob.model = d.model dob.transport = d.transport dob.vendor = d.vendor dob.btrfs_uuid = d.btrfs_uuid # If attached disk has an fs and it isn't btrfs if (d.fstype is not None and d.fstype != 'btrfs'): dob.btrfs_uuid = None dob.parted = True # If our existing Pool db knows of this disk's pool via it's label: if (Pool.objects.filter(name=d.label).exists()): # update the disk db object's pool field accordingly. dob.pool = Pool.objects.get(name=d.label) else: # this disk is not known to exist in any pool via it's label dob.pool = None # If no pool has yet been found with this disk's label in and # the attached disk is our root disk (flagged by scan_disks) if (dob.pool is None and d.root is True): # setup our special root disk db entry in Pool p = Pool(name=settings.ROOT_POOL, raid='single') p.disk_set.add(dob) p.save() # update disk db object to reflect special root pool status dob.pool = p dob.save() p.size = pool_usage(mount_root(p))[0] enable_quota(p) p.uuid = btrfs_uuid(dob.name) p.save() # save our updated db disk object dob.save() # Update online db entries with S.M.A.R.T availability and status. for do in Disk.objects.all(): # find all the not offline db entries if (not do.offline): # We have an attached disk db entry if re.match('vd', do.name): # Virtio disks (named vd*) have no smart capability. # avoids cluttering logs with exceptions on these devices. do.smart_available = do.smart_enabled = False continue # try to establish smart availability and status and update db try: # for non ata/sata drives do.smart_available, do.smart_enabled = smart.available( do.name) except Exception, e: logger.exception(e) do.smart_available = do.smart_enabled = False do.save()
def _update_disk_state(): """ A db atomic method to update the database of attached disks / drives. Works only on device serial numbers for drive identification. Calls scan_disks to establish the current connected drives info. Initially removes duplicate by serial number db entries to deal with legacy db states and obfuscates all previous device names as they are transient. The drive database is then updated with the attached disks info and previously known drives no longer found attached are marked as offline. All offline drives have their SMART availability and activation status removed and all attached drives have their SMART availability assessed and activated if available. :return: serialized models of attached and missing disks via serial num """ # Acquire a list (namedtupil collection) of attached drives > min size disks = scan_disks(settings.MIN_DISK_SIZE) serial_numbers_seen = [] # Make sane our db entries in view of what we know we have attached. # Device serial number is only known external unique entry, scan_disks # make this so in the case of empty or repeat entries by providing # fake serial numbers which are in turn flagged via WebUI as unreliable. # 1) scrub all device names with unique but nonsense uuid4 # 1) mark all offline disks as such via db flag # 2) mark all offline disks smart available and enabled flags as False logger.info('update_disk_state() Called') for do in Disk.objects.all(): # Replace all device names with a unique placeholder on each scan # N.B. do not optimize by re-using uuid index as this could lead # to a non refreshed webui acting upon an entry that is different # from that shown to the user. do.name = str(uuid.uuid4()).replace('-', '') # 32 chars long # Delete duplicate or fake by serial number db disk entries. # It makes no sense to save fake serial number drives between scans # as on each scan the serial number is re-generated anyway. if (do.serial in serial_numbers_seen) or (len(do.serial) == 48): logger.info('Deleting duplicate or fake (by serial) Disk db ' 'entry. Serial = %s' % do.serial) do.delete() # django >=1.9 returns a dict of deleted items. # Continue onto next db disk object as nothing more to process. continue # first encounter of this serial in the db so stash it for reference serial_numbers_seen.append(deepcopy(do.serial)) # Look for devices (by serial number) that are in the db but not in # our disk scan, ie offline / missing. if (do.serial not in [d.serial for d in disks]): # update the db entry as offline do.offline = True # disable S.M.A.R.T available and enabled flags. do.smart_available = do.smart_enabled = False do.save() # make sure all updates are flushed to db # Our db now has no device name info as all dev names are place holders. # Iterate over attached drives to update the db's knowledge of them. # Kernel dev names are unique so safe to overwrite our db unique name. for d in disks: # start with an empty disk object dob = None # If the db has an entry with this disk's serial number then # use this db entry and update the device name from our recent scan. if (Disk.objects.filter(serial=d.serial).exists()): dob = Disk.objects.get(serial=d.serial) dob.name = d.name else: # We have an assumed new disk entry as no serial match in db. # Build a new entry for this disk. dob = Disk(name=d.name, serial=d.serial) # Update the db disk object (existing or new) with our scanned info dob.size = d.size dob.parted = d.parted dob.offline = False # as we are iterating over attached devices dob.model = d.model dob.transport = d.transport dob.vendor = d.vendor dob.btrfs_uuid = d.btrfs_uuid # If attached disk has an fs and it isn't btrfs if (d.fstype is not None and d.fstype != 'btrfs'): dob.btrfs_uuid = None dob.parted = True # If our existing Pool db knows of this disk's pool via it's label: if (Pool.objects.filter(name=d.label).exists()): # update the disk db object's pool field accordingly. dob.pool = Pool.objects.get(name=d.label) else: # this disk is not known to exist in any pool via it's label dob.pool = None # If no pool has yet been found with this disk's label in and # the attached disk is our root disk (flagged by scan_disks) if (dob.pool is None and d.root is True): # setup our special root disk db entry in Pool p = Pool(name=settings.ROOT_POOL, raid='single') p.disk_set.add(dob) p.save() # update disk db object to reflect special root pool status dob.pool = p dob.save() p.size = pool_usage(mount_root(p))[0] enable_quota(p) p.uuid = btrfs_uuid(dob.name) p.save() # save our updated db disk object dob.save() # Update online db entries with S.M.A.R.T availability and status. for do in Disk.objects.all(): # find all the not offline db entries if (not do.offline): # We have an attached disk db entry if re.match('vd', do.name): # Virtio disks (named vd*) have no smart capability. # avoids cluttering logs with exceptions on these devices. do.smart_available = do.smart_enabled = False continue # try to establish smart availability and status and update db try: # for non ata/sata drives do.smart_available, do.smart_enabled = smart.available(do.name) except Exception, e: logger.exception(e) do.smart_available = do.smart_enabled = False do.save()
def _update_disk_state(): """ A db atomic method to update the database of attached disks / drives. Works only on device serial numbers for drive identification. Calls scan_disks to establish the current connected drives info. Initially removes duplicate by serial number db entries to deal with legacy db states and obfuscates all previous device names as they are transient. The drive database is then updated with the attached disks info and previously known drives no longer found attached are marked as offline. All offline drives have their SMART availability and activation status removed and all attached drives have their SMART availability assessed and activated if available. :return: serialized models of attached and missing disks via serial num """ # Acquire a list (namedtupil collection) of attached drives > min size disks = scan_disks(settings.MIN_DISK_SIZE) serial_numbers_seen = [] # Make sane our db entries in view of what we know we have attached. # Device serial number is only known external unique entry, scan_disks # make this so in the case of empty or repeat entries by providing # fake serial numbers which are in turn flagged via WebUI as unreliable. # 1) scrub all device names with unique but nonsense uuid4 # 1) mark all offline disks as such via db flag # 2) mark all offline disks smart available and enabled flags as False # logger.info('update_disk_state() Called') for do in Disk.objects.all(): # Replace all device names with a unique placeholder on each scan # N.B. do not optimize by re-using uuid index as this could lead # to a non refreshed webui acting upon an entry that is different # from that shown to the user. do.name = str(uuid.uuid4()).replace('-', '') # 32 chars long # Delete duplicate or fake by serial number db disk entries. # It makes no sense to save fake serial number drives between scans # as on each scan the serial number is re-generated anyway. if (do.serial in serial_numbers_seen) or (len(do.serial) == 48): logger.info('Deleting duplicate or fake (by serial) Disk db ' 'entry. Serial = %s' % do.serial) do.delete() # django >=1.9 returns a dict of deleted items. # Continue onto next db disk object as nothing more to process. continue # first encounter of this serial in the db so stash it for reference serial_numbers_seen.append(deepcopy(do.serial)) # Look for devices (by serial number) that are in the db but not in # our disk scan, ie offline / missing. if (do.serial not in [d.serial for d in disks]): # update the db entry as offline do.offline = True # disable S.M.A.R.T available and enabled flags. do.smart_available = do.smart_enabled = False do.save() # make sure all updates are flushed to db # Our db now has no device name info as all dev names are place holders. # Iterate over attached drives to update the db's knowledge of them. # Kernel dev names are unique so safe to overwrite our db unique name. for d in disks: # start with an empty disk object dob = None # If the db has an entry with this disk's serial number then # use this db entry and update the device name from our recent scan. if (Disk.objects.filter(serial=d.serial).exists()): dob = Disk.objects.get(serial=d.serial) dob.name = d.name else: # We have an assumed new disk entry as no serial match in db. # Build a new entry for this disk. dob = Disk(name=d.name, serial=d.serial) # Update the db disk object (existing or new) with our scanned info dob.size = d.size dob.parted = d.parted dob.offline = False # as we are iterating over attached devices dob.model = d.model dob.transport = d.transport dob.vendor = d.vendor dob.btrfs_uuid = d.btrfs_uuid # If attached disk has an fs and it isn't btrfs if (d.fstype is not None and d.fstype != 'btrfs'): dob.btrfs_uuid = None dob.parted = True # overload use of parted as non btrfs flag. # N.B. this overload use may become redundant with the addition # of the Disk.role field. # Update the role field with scan_disks findings, currently only # mdraid membership type based on fstype info. In the case of # these raid member indicators from scan_disks() we have the # current truth provided so update the db role status accordingly. # N.B. this if else could be expanded to accommodate other # roles based on the fs found if d.fstype == 'isw_raid_member' or d.fstype == 'linux_raid_member': # We have an indicator of mdraid membership so update existing # role info if any. # N.B. We have a minor legacy issue in that prior to using json # format for the db role field we stored one of 2 strings. # if these 2 strings are found then ignore them as we then # overwrite with our current finding and in the new json format. # I.e. non None could also be a legacy entry so follow overwrite # path when legacy entry found by treating as a None entry. # todo - When we reset migrations the following need only check # todo - "dob.role is not None" if dob.role is not None and dob.role != 'isw_raid_member' \ and dob.role != 'linux_raid_member': # get our known roles into a dictionary known_roles = json.loads(dob.role) # create or update an mdraid dictionary entry known_roles['mdraid'] = str(d.fstype) # return updated dict to json format and store in db object dob.role = json.dumps(known_roles) else: # We have a dob.role = None so just insert our new role. # Also applies to legacy pre json role entries. dob.role = '{"mdraid": "' + d.fstype + '"}' # json string else: # We know this disk is not an mdraid raid member. # No identified role from scan_disks() fstype value (mdraid # only for now )so we preserve any prior known roles not # exposed by scan_disks but remove the mdraid role if found. # todo - When we reset migrations the following need only check # todo - "dob.role is not None" if dob.role is not None and dob.role != 'isw_raid_member' \ and dob.role != 'linux_raid_member': # remove mdraid role if found but preserve prior roles # which should now only be in json format known_roles = json.loads(dob.role) if 'mdraid' in known_roles: if len(known_roles) > 1: # mdraid is not the only entry so we have to pull # out only mdraid from dict and convert back to json del known_roles['mdraid'] dob.role = json.dumps(known_roles) else: # mdraid was the only entry so we need not bother # with dict edit and json conversion only to end up # with an empty json {} so revert to default 'None'. dob.role = None else: # Empty or legacy role entry. # We have either None or a legacy mdraid role when this disk # is no longer an mdraid member. We can now assert None. dob.role = None # If our existing Pool db knows of this disk's pool via it's label: if (Pool.objects.filter(name=d.label).exists()): # update the disk db object's pool field accordingly. dob.pool = Pool.objects.get(name=d.label) #this is for backwards compatibility. root pools created #before the pool.role migration need this. It can safely be #removed a few versions after 3.8-11 or when we reset migrations. if (d.root is True): dob.pool.role = 'root' dob.pool.save() else: # this disk is not known to exist in any pool via it's label dob.pool = None # If no pool has yet been found with this disk's label in and # the attached disk is our root disk (flagged by scan_disks) if (dob.pool is None and d.root is True): # setup our special root disk db entry in Pool #@todo: dynamically retrieve raid level. p = Pool(name=d.label, raid='single', role='root') p.disk_set.add(dob) p.save() # update disk db object to reflect special root pool status dob.pool = p dob.save() p.size = pool_usage(mount_root(p))[0] enable_quota(p) p.uuid = btrfs_uuid(dob.name) p.save() # save our updated db disk object dob.save() # Update online db entries with S.M.A.R.T availability and status. for do in Disk.objects.all(): # find all the not offline db entries if (not do.offline): # We have an attached disk db entry if (re.match('vd|md|mmcblk', do.name) is not None): # Virtio disks (named vd*), md devices (named md*), and # an sdcard reader that provides devs named mmcblk* have # no smart capability so avoid cluttering logs with # exceptions on these types of devices. do.smart_available = do.smart_enabled = False continue # try to establish smart availability and status and update db try: # for non ata/sata drives do.smart_available, do.smart_enabled = smart.available( do.name, do.smart_options) except Exception, e: logger.exception(e) do.smart_available = do.smart_enabled = False do.save()