예제 #1
0
def get_pool_info(disk):
    """
    Extracts any pool information by running btrfs fi show <disk> and collates
    the results by 'Label', 'uuid', and current boot disk name. The disk name
    is then translated to the by-id type found in /dev/disk/by-id so that it's
    counterparts in the db's Disk.name field can be found.
    N.B. devices without serial may have no by-id counterpart.
    Used by CommandView()._refresh_pool_state() and
    DiskDetailView()._btrfs_disk_import
    :param disk: by-id disk name without path
    :return: a dictionary with keys of 'disks', 'label', and 'uuid';
    disks keys a list of devices, while label and uuid keys are for strings.
    """
    cmd = [BTRFS, 'fi', 'show', '/dev/disk/by-id/%s' % disk]
    o, e, rc = run_command(cmd)
    pool_info = {'disks': [],}
    for l in o:
        if (re.match('Label', l) is not None):
            fields = l.split()
            pool_info['label'] = fields[1].strip("'")
            pool_info['uuid'] = fields[3]
        elif (re.match('\tdevid', l) is not None):
            # We have a line starting wth <tab>devid, extract the dev name.
            # Previously this would have been sda and used as is but we need
            # it's by-id references as that is the new format for Disks.name.
            # Original sda extraction:
            # pool_info['disks'].append(l.split()[-1].split('/')[-1])
            # Updated '/dev/sda' extraction to save on a split we no longer need
            # and use this 'now' name to get our by-id name with path removed.
            # This is required as that is how device names  are stored in the
            # db Disk.name so that we can locate a drive and update it's pool
            # field reference.
            dev_byid, is_byid = get_dev_byid_name(l.split()[-1], True)
            pool_info['disks'].append(dev_byid)
    return pool_info
예제 #2
0
 def test_get_dev_byid_name_no_devlinks(self):
     """
     Test as yet un-observed circumstance of no DEVLINKS entry for:
     get_dev_byid_name(): exercises debug log of same.
     """
     dev_name = '/dev/arbitrary'
     remove_path = True
     o = ['']  # no entries of any kind
     e = ['']
     r = 0
     expected = ('arbitrary', False)
     self.mock_run_command.return_value = (o, e, r)
     returned = get_dev_byid_name(dev_name, remove_path)
     self.assertEqual(returned, expected,
                      msg='Un-expected get_dev_byid_name() result:\n '
                          'returned = ({}).\n '
                          'expected = ({}).'.format(returned, expected))
예제 #3
0
 def _refresh_pool_state():
     for p in Pool.objects.all():
         # If our pool has no disks, detached included, then delete it.
         # We leave pools with all detached members in place intentionally.
         if (p.disk_set.count() == 0):
             p.delete()
             continue
         # Log if no attached members are found, ie all devs are detached.
         if p.disk_set.attached().count() == 0:
             logger.error('Skipping Pool (%s) mount as there '
                          'are no attached devices. Moving on.' %
                          p.name)
             continue
         try:
             # Get and save what info we can prior to mount.
             first_attached_dev = p.disk_set.attached().first()
             is_root_pool = (p.role == 'root')
             # Observe any redirect role by using target_name.
             byid_disk_name, is_byid = get_dev_byid_name(
                 get_device_path(first_attached_dev.target_name))
             if is_byid:
                 pool_info = get_pool_info(first_attached_dev.target_name,
                                           is_root_pool)
                 pool_name = pool_info['label']
             else:
                 logger.error('Skipping pool ({}) mount as attached disk '
                              '({}) has no by-id name (no serial # ?)'.
                              format(p.name,
                                     first_attached_dev.target_name))
                 continue
             p.name = pool_name
             p.save()
             mount_root(p)
             p.raid = pool_raid('%s%s' % (settings.MNT_PT, p.name))['data']
             p.size = p.usage_bound()
             # Consider using mount_status() parse to update root pool db on
             # active (fstab initiated) compression setting.
             p.save()
         except Exception as e:
             logger.error('Exception while refreshing state for '
                          'Pool(%s). Moving on: %s' %
                          (p.name, e.__str__()))
             logger.exception(e)
예제 #4
0
def get_pool_info(disk):
    """
    Extracts any pool information by running btrfs fi show <disk> and collates
    the results by 'Label', 'uuid', and a list of disk names. The disks names
    found are translated to the by-id type (/dev/disk/by-id)so that their
    counterparts in the db's Disk.name field can be found.
    N.B. devices without serial may have no by-id counterpart.
    Used by CommandView()._refresh_pool_state() and
    DiskDetailView()._btrfs_disk_import
    :param disk: by-id disk name without path
    :return: a dictionary with keys of 'disks', 'label', and 'uuid';
    disks keys a list of devices, while label and uuid keys are for strings.
    """
    dpath = '/dev/disk/by-id/%s' % disk
    cmd = [BTRFS, 'fi', 'show', dpath]
    o, e, rc = run_command(cmd)
    pool_info = {
        'disks': [],
    }
    for l in o:
        if (re.match('Label', l) is not None):
            fields = l.split()
            pool_info['uuid'] = fields[3]
            label = fields[1].strip("'")
            if label == 'none':
                # fs has no label, set label = uuid.
                label = pool_info['uuid']
                run_command([BTRFS, 'fi', 'label', dpath, label])
            pool_info['label'] = label
        elif (re.match('\tdevid', l) is not None):
            # We have a line starting wth <tab>devid, extract the dev name.
            # Previously this would have been sda and used as is but we need
            # it's by-id references as that is the new format for Disks.name.
            # Original sda extraction:
            # pool_info['disks'].append(l.split()[-1].split('/')[-1])
            # Updated '/dev/sda' extraction to save on a split we no longer
            # need and use this 'now' name to get our by-id name with path
            # removed. This is required as that is how device names are stored
            # in the db Disk.name so that we can locate a drive and update it's
            # pool field reference.
            dev_byid, is_byid = get_dev_byid_name(l.split()[-1], True)
            pool_info['disks'].append(dev_byid)
    return pool_info
예제 #5
0
 def test_get_dev_byid_name_node_not_found(self):
     """
     test get_dev_byid_name when supplied dev name not found
     This could happen if a device was live unplugged and udev removed it's
     /dev entries just prior to get_dev_byid_name()'s execution.
     Exercises error log of this event.
     """
     dev_name = '/dev/bogus'
     remove_path = True
     o = ['']
     e = ['device node not found', '']
     r = 2
     expected = ('bogus', False)
     self.mock_run_command.return_value = (o, e, r)
     returned = get_dev_byid_name(dev_name, remove_path)
     self.assertEqual(returned, expected,
                      msg='Un-expected get_dev_byid_name() result:\n '
                          'returned = ({}).\n '
                          'expected = ({}).'.format(returned, expected))
예제 #6
0
 def _refresh_pool_state():
     for p in Pool.objects.all():
         # If our pool has no disks, detached included, then delete it.
         # We leave pools with all detached members in place intentionally.
         if (p.disk_set.count() == 0):
             p.delete()
             continue
         # Log if no attached members are found, ie all devs are detached.
         if p.disk_set.attached().count() == 0:
             logger.error('Skipping Pool (%s) mount as there '
                          'are no attached devices. Moving on.' % p.name)
             continue
         try:
             # Get and save what info we can prior to mount.
             first_attached_dev = p.disk_set.attached().first()
             is_root_pool = (p.role == 'root')
             # Observe any redirect role by using target_name.
             byid_disk_name, is_byid = get_dev_byid_name(
                 get_device_path(first_attached_dev.target_name))
             if is_byid:
                 pool_info = get_pool_info(first_attached_dev.target_name,
                                           is_root_pool)
                 pool_name = pool_info['label']
             else:
                 logger.error(
                     'Skipping pool ({}) mount as attached disk '
                     '({}) has no by-id name (no serial # ?)'.format(
                         p.name, first_attached_dev.target_name))
                 continue
             p.name = pool_name
             p.save()
             mount_root(p)
             p.raid = pool_raid('%s%s' % (settings.MNT_PT, p.name))['data']
             p.size = p.usage_bound()
             # Consider using mount_status() parse to update root pool db on
             # active (fstab initiated) compression setting.
             p.save()
         except Exception as e:
             logger.error('Exception while refreshing state for '
                          'Pool(%s). Moving on: %s' % (p.name, e.__str__()))
             logger.exception(e)
예제 #7
0
def cur_devices(mnt_pt):
    """
    When given a btrfs mount point a list containing the full path of all
    devices is generated by wrapping the btrfs fi show <mnt_pt> command and
    parsing the devid line.
    Used by resize_pool() to ascertain membership status of a device in a pool
    :param mnt_pt: btrfs mount point
    :return: list containing the current reported devices associated with a
    btrfs mount point in by-id (with full path) format.
    """
    dev_list_byid = []
    o, e, rc = run_command([BTRFS, 'fi', 'show', mnt_pt])
    for l in o:
        l = l.strip()
        if (re.match('devid ', l) is not None):
            # The following extracts the devices from the above command output,
            # ie /dev/sda type names, but these are transient and we use their
            # by-id type counterparts in the db and our logging hence the
            # call to convert the 'now' names to by-id type names.
            # N.B. As opposed to get_pool_info we want to preserve the path as
            # our caller expects this full path format.
            dev_byid, is_byid = get_dev_byid_name(l.split()[-1])
            dev_list_byid.append(dev_byid)
    return dev_list_byid
예제 #8
0
def cur_devices(mnt_pt):
    """
    When given a btrfs mount point a list containing the full path of all
    devices is generated by wrapping the btrfs fi show <mnt_pt> command and
    parsing the devid line.
    Used by resize_pool() to ascertain membership status of a device in a pool
    :param mnt_pt: btrfs mount point
    :return: list containing the current reported devices associated with a
    btrfs mount point in by-id (with full path) format.
    """
    dev_list_byid = []
    o, e, rc = run_command([BTRFS, 'fi', 'show', mnt_pt])
    for l in o:
        l = l.strip()
        if (re.match('devid ', l) is not None):
            # The following extracts the devices from the above command output,
            # ie /dev/sda type names, but these are transient and we use their
            # by-id type counterparts in the db and our logging hence the
            # call to convert the 'now' names to by-id type names.
            # N.B. As opposed to get_pool_info we want to preserve the path as
            # our caller expects this full path format.
            dev_byid, is_byid = get_dev_byid_name(l.split()[-1])
            dev_list_byid.append(dev_byid)
    return dev_list_byid
예제 #9
0
    def _update_disk_state():
        """
        A db atomic method to update the database of attached disks / drives.
        Works only on device serial numbers for drive identification.
        Calls scan_disks to establish the current connected drives info.
        Initially removes duplicate by serial number db entries to deal
        with legacy db states and obfuscates all previous device names as they
        are transient. The drive database is then updated with the attached
        disks info and previously known drives no longer found attached are
        marked as offline. All offline drives have their SMART availability and
        activation status removed and all attached drives have their SMART
        availability assessed and activated if available.
        :return: serialized models of attached and missing disks via serial num
        """
        # Acquire a list (namedtupil collection) of attached drives > min size
        disks = scan_disks(settings.MIN_DISK_SIZE)
        serial_numbers_seen = []
        # Make sane our db entries in view of what we know we have attached.
        # Device serial number is only known external unique entry, scan_disks
        # make this so in the case of empty or repeat entries by providing
        # fake serial numbers which are in turn flagged via WebUI as unreliable.
        # 1) scrub all device names with unique but nonsense uuid4
        # 1) mark all offline disks as such via db flag
        # 2) mark all offline disks smart available and enabled flags as False
        # logger.info('update_disk_state() Called')
        for do in Disk.objects.all():
            # Replace all device names with a unique placeholder on each scan
            # N.B. do not optimize by re-using uuid index as this could lead
            # to a non refreshed webui acting upon an entry that is different
            # from that shown to the user.
            do.name = 'detached-' + str(uuid.uuid4()).replace('-', '')
            # Delete duplicate or fake by serial number db disk entries.
            # It makes no sense to save fake serial number drives between scans
            # as on each scan the serial number is re-generated (fake) anyway.
            # Serial numbers beginning with 'fake-serial-' are from scan_disks.
            if (do.serial in serial_numbers_seen) or (
                    re.match('fake-serial-', do.serial) is not None):
                logger.info('Deleting duplicate or fake (by serial) Disk db '
                            'entry. Serial = %s' % do.serial)
                do.delete()  # django >=1.9 returns a dict of deleted items.
                # Continue onto next db disk object as nothing more to process.
                continue
            # first encounter of this serial in the db so stash it for reference
            serial_numbers_seen.append(deepcopy(do.serial))
            # Look for devices (by serial number) that are in the db but not in
            # our disk scan, ie offline / missing.
            if (do.serial not in [d.serial for d in disks]):
                # update the db entry as offline
                do.offline = True
                # disable S.M.A.R.T available and enabled flags.
                do.smart_available = do.smart_enabled = False
            do.save()  # make sure all updates are flushed to db
        # Our db now has no device name info as all dev names are place holders.
        # Iterate over attached drives to update the db's knowledge of them.
        # Kernel dev names are unique so safe to overwrite our db unique name.
        for d in disks:
            # start with an empty disk object
            dob = None
            # Convert our transient but just scanned so current sda type name
            # to a more useful by-id type name as found in /dev/disk/by-id
            byid_disk_name, is_byid = get_dev_byid_name(d.name, True)
            # If the db has an entry with this disk's serial number then
            # use this db entry and update the device name from our recent scan.
            if (Disk.objects.filter(serial=d.serial).exists()):
                dob = Disk.objects.get(serial=d.serial)
                #dob.name = d.name
                dob.name = byid_disk_name
            else:
                # We have an assumed new disk entry as no serial match in db.
                # Build a new entry for this disk.
                #dob = Disk(name=d.name, serial=d.serial)
                # N.B. we may want to force a fake-serial here if is_byid False,
                # that way we flag as unusable disk as no by-id type name found.
                # It may already have been set though as the only by-id
                # failures so far are virtio disks with no serial so scan_disks
                # will have already given it a fake serial in d.serial.
                dob = Disk(name=byid_disk_name, serial=d.serial)
            # Update the db disk object (existing or new) with our scanned info
            dob.size = d.size
            dob.parted = d.parted
            dob.offline = False  # as we are iterating over attached devices
            dob.model = d.model
            dob.transport = d.transport
            dob.vendor = d.vendor
            dob.btrfs_uuid = d.btrfs_uuid
            # If attached disk has an fs and it isn't btrfs
            if (d.fstype is not None and d.fstype != 'btrfs'):
                dob.btrfs_uuid = None
                dob.parted = True  # overload use of parted as non btrfs flag.
                # N.B. this overload use may become redundant with the addition
                # of the Disk.role field.
            # Update the role field with scan_disks findings, currently only
            # mdraid membership type based on fstype info. In the case of
            # these raid member indicators from scan_disks() we have the
            # current truth provided so update the db role status accordingly.
            # N.B. this if else could be expanded to accommodate other
            # roles based on the fs found
            if d.fstype == 'isw_raid_member' or d.fstype == 'linux_raid_member':
                # We have an indicator of mdraid membership so update existing
                # role info if any.
                # N.B. We have a minor legacy issue in that prior to using json
                # format for the db role field we stored one of 2 strings.
                # if these 2 strings are found then ignore them as we then
                # overwrite with our current finding and in the new json format.
                # I.e. non None could also be a legacy entry so follow overwrite
                # path when legacy entry found by treating as a None entry.
                # TODO: When we reset migrations the following need only check
                # TODO: "dob.role is not None"
                if dob.role is not None and dob.role != 'isw_raid_member' \
                        and dob.role != 'linux_raid_member':
                    # get our known roles into a dictionary
                    known_roles = json.loads(dob.role)
                    # create or update an mdraid dictionary entry
                    known_roles['mdraid'] = str(d.fstype)
                    # return updated dict to json format and store in db object
                    dob.role = json.dumps(known_roles)
                else:  # We have a dob.role = None so just insert our new role.
                    # Also applies to legacy pre json role entries.
                    dob.role = '{"mdraid": "' + d.fstype + '"}'  # json string
            else:  # We know this disk is not an mdraid raid member.
                # No identified role from scan_disks() fstype value (mdraid
                # only for now )so we preserve any prior known roles not
                # exposed by scan_disks but remove the mdraid role if found.
                # TODO: When we reset migrations the following need only check
                # TODO: "dob.role is not None"
                if dob.role is not None and dob.role != 'isw_raid_member' \
                        and dob.role != 'linux_raid_member':
                    # remove mdraid role if found but preserve prior roles
                    # which should now only be in json format
                    known_roles = json.loads(dob.role)
                    if 'mdraid' in known_roles:
                        if len(known_roles) > 1:
                            # mdraid is not the only entry so we have to pull
                            # out only mdraid from dict and convert back to json
                            del known_roles['mdraid']
                            dob.role = json.dumps(known_roles)
                        else:
                            # mdraid was the only entry so we need not bother
                            # with dict edit and json conversion only to end up
                            # with an empty json {} so revert to default 'None'.
                            dob.role = None
                else:  # Empty or legacy role entry.
                    # We have either None or a legacy mdraid role when this disk
                    # is no longer an mdraid member. We can now assert None.
                    dob.role = None
            # If our existing Pool db knows of this disk's pool via it's label:
            if (Pool.objects.filter(name=d.label).exists()):
                # update the disk db object's pool field accordingly.
                dob.pool = Pool.objects.get(name=d.label)

                #this is for backwards compatibility. root pools created
                #before the pool.role migration need this. It can safely be
                #removed a few versions after 3.8-11 or when we reset migrations.
                if (d.root is True):
                    dob.pool.role = 'root'
                    dob.pool.save()
            else:  # this disk is not known to exist in any pool via it's label
                dob.pool = None
            # If no pool has yet been found with this disk's label in and
            # the attached disk is our root disk (flagged by scan_disks)
            if (dob.pool is None and d.root is True):
                # setup our special root disk db entry in Pool
                # TODO: dynamically retrieve raid level.
                p = Pool(name=d.label, raid='single', role='root')
                p.disk_set.add(dob)
                p.save()
                # update disk db object to reflect special root pool status
                dob.pool = p
                dob.save()
                p.size = pool_usage(mount_root(p))[0]
                enable_quota(p)
                p.uuid = btrfs_uuid(dob.name)
                p.save()
            # save our updated db disk object
            dob.save()
        # Update online db entries with S.M.A.R.T availability and status.
        for do in Disk.objects.all():
            # find all the not offline db entries
            if (not do.offline):
                # We have an attached disk db entry.
                # Since our Disk.name model now uses by-id type names we can
                # do cheap matches to the beginnings of these names to find
                # virtio, md, or sdcard devices which are assumed to have no
                # SMART capability.
                # We also disable devices smart support when they have a
                # fake serial number as ascribed by scan_disks as any SMART
                # data collected is then less likely to be wrongly associated
                # with the next device that takes this temporary drive's name.
                # Also note that with no serial number some device types will
                # not have a by-id type name expected by the smart subsystem.
                # This has only been observed in no serial virtio devices.
                if (re.match('fake-serial-', do.serial) is not None) or \
                        (re.match('virtio-|md-|mmc-|nvme-', do.name) is not None):
                    # Virtio disks (named virtio-*), md devices (named md-*),
                    # and an sdcard reader that provides devs named mmc-* have
                    # no smart capability so avoid cluttering logs with
                    # exceptions on probing these with smart.available.
                    # nvme not yet supported by CentOS 7 smartmontools:
                    # https://www.smartmontools.org/ticket/657
                    # Thanks to @snafu in rockstor forum post 1567 for this.
                    do.smart_available = do.smart_enabled = False
                    continue
                # try to establish smart availability and status and update db
                try:
                    # for non ata/sata drives
                    do.smart_available, do.smart_enabled = smart.available(
                        do.name, do.smart_options)
                except Exception, e:
                    logger.exception(e)
                    do.smart_available = do.smart_enabled = False
            do.save()
예제 #10
0
    def _update_disk_state():
        """
        A db atomic method to update the database of attached disks / drives.
        Works only on device serial numbers for drive identification.
        Calls scan_disks to establish the current connected drives info.
        Initially removes duplicate by serial number db entries to deal
        with legacy db states and obfuscates all previous device names as they
        are transient. The drive database is then updated with the attached
        disks info and previously known drives no longer found attached are
        marked as offline. All offline drives have their SMART availability and
        activation status removed and all attached drives have their SMART
        availability assessed and activated if available.
        :return: serialized models of attached and missing disks via serial num
        """
        # Acquire a list (namedtupil collection) of attached drives > min size
        disks = scan_disks(settings.MIN_DISK_SIZE)
        serial_numbers_seen = []
        # Make sane our db entries in view of what we know we have attached.
        # Device serial number is only known external unique entry, scan_disks
        # make this so in the case of empty or repeat entries by providing
        # fake serial numbers which are flagged via WebUI as unreliable.
        # 1) Scrub all device names with unique but nonsense uuid4.
        # 2) Mark all offline disks as such via db flag.
        # 3) Mark all offline disks smart available and enabled flags as False.
        for do in Disk.objects.all():
            # Replace all device names with a unique placeholder on each scan
            # N.B. do not optimize by re-using uuid index as this could lead
            # to a non refreshed webui acting upon an entry that is different
            # from that shown to the user.
            do.name = 'detached-' + str(uuid.uuid4()).replace('-', '')
            # Delete duplicate or fake by serial number db disk entries.
            # It makes no sense to save fake serial number drives between scans
            # as on each scan the serial number is re-generated (fake) anyway.
            # Serial numbers beginning with 'fake-serial-' are from scan_disks.
            if (do.serial in serial_numbers_seen) or (re.match(
                    'fake-serial-', do.serial) is not None):
                logger.info('Deleting duplicate or fake (by serial) Disk db '
                            'entry. Serial = %s' % do.serial)
                do.delete()  # django >=1.9 returns a dict of deleted items.
                # Continue onto next db disk object as nothing more to process.
                continue
            # first encounter of this serial in the db so stash it for
            # reference
            serial_numbers_seen.append(deepcopy(do.serial))
            # Look for devices (by serial number) that are in the db but not in
            # our disk scan, ie offline / missing.
            if (do.serial not in [d.serial for d in disks]):
                # update the db entry as offline
                do.offline = True
                # disable S.M.A.R.T available and enabled flags.
                do.smart_available = do.smart_enabled = False
            do.save()  # make sure all updates are flushed to db
        # Our db now has no device name info: all dev names are place holders.
        # Iterate over attached drives to update the db's knowledge of them.
        # Kernel dev names are unique so safe to overwrite our db unique name.
        for d in disks:
            # start with an empty disk object
            dob = None
            # an empty dictionary of non scan_disk() roles
            non_scan_disks_roles = {}
            # and an empty dictionary of discovered roles
            disk_roles_identified = {}
            # Convert our transient but just scanned so current sda type name
            # to a more useful by-id type name as found in /dev/disk/by-id
            byid_disk_name, is_byid = get_dev_byid_name(d.name, True)
            # If the db has an entry with this disk's serial number then
            # use this db entry and update the device name from our new scan.
            if (Disk.objects.filter(serial=d.serial).exists()):
                dob = Disk.objects.get(serial=d.serial)
                dob.name = byid_disk_name
            else:
                # We have an assumed new disk entry as no serial match in db.
                # Build a new entry for this disk.  N.B. we may want to force a
                # fake-serial here if is_byid False, that way we flag as
                # unusable disk as no by-id type name found.  It may already
                # have been set though as the only by-id failures so far are
                # virtio disks with no serial so scan_disks will have already
                # given it a fake serial in d.serial.
                dob = Disk(name=byid_disk_name, serial=d.serial, role=None)
            # Update the db disk object (existing or new) with our scanned info
            dob.size = d.size
            dob.parted = d.parted
            dob.offline = False  # as we are iterating over attached devices
            dob.model = d.model
            dob.transport = d.transport
            dob.vendor = d.vendor
            # N.B. The Disk.btrfs_uuid is in some senses becoming misleading
            # as we begin to deal with Disk.role managed drives such as mdraid
            # members and full disk LUKS drives where we can make use of the
            # non btrfs uuids to track filesystems or LUKS containers.
            # Leaving as is for now to avoid db changes.
            dob.btrfs_uuid = d.uuid
            # If attached disk has an fs and it isn't btrfs
            if (d.fstype is not None and d.fstype != 'btrfs'):
                # blank any btrfs_uuid it may have had previously.
                dob.btrfs_uuid = None
            # ### BEGINNING OF ROLE FIELD UPDATE ###
            # Update the role field with scan_disks findings.
            # SCAN_DISKS_KNOWN_ROLES a list of scan_disks identifiable roles.
            # Deal with legacy non json role field contents by erasure.
            # N.B. We have a minor legacy issue in that prior to using json
            # format for the db role field we stored one of 2 strings.
            # If either of these 2 strings are found reset to db default of
            # None
            if dob.role == 'isw_raid_member'\
                    or dob.role == 'linux_raid_member':
                # These are the only legacy non json formatted roles used.
                # Erase legacy role entries as we are about to update the role
                # anyway and new entries will then be in the new json format.
                # This helps to keeps the following role logic cleaner and
                # existing mdraid members will be re-assigned if appropriate
                # using the new json format.
                dob.role = None
            # First extract all non scan_disks assigned roles so we can add
            # them back later; all scan_disks assigned roles will be identified
            # from our recent scan_disks data so we assert the new truth.
            if dob.role is not None:  # db default null=True so None here.
                # Get our previous roles into a dictionary
                previous_roles = json.loads(dob.role)
                # Preserve non scan_disks identified roles for this db entry
                non_scan_disks_roles = {
                    role: v
                    for role, v in previous_roles.items()
                    if role not in SCAN_DISKS_KNOWN_ROLES
                }
            if d.fstype == 'isw_raid_member' \
                    or d.fstype == 'linux_raid_member':
                # MDRAID MEMBER: scan_disks() can informs us of the truth
                # regarding mdraid membership via d.fstype indicators.
                # create or update an mdraid dictionary entry
                disk_roles_identified['mdraid'] = str(d.fstype)
            if d.fstype == 'crypto_LUKS':
                # LUKS FULL DISK: scan_disks() can inform us of the truth
                # regarding full disk LUKS containers which on creation have a
                # unique uuid. Stash this uuid so we might later work out our
                # container mapping.
                disk_roles_identified['LUKS'] = str(d.uuid)
            if d.type == 'crypt':
                # OPEN LUKS DISK: scan_disks() can inform us of the truth
                # regarding an opened LUKS container which appears as a mapped
                # device. Assign the /dev/disk/by-id name as a value.
                disk_roles_identified['openLUKS'] = 'dm-name-%s' % d.name
            if d.fstype == 'bcache':
                # BCACHE: scan_disks() can inform us of the truth regarding
                # bcache "backing devices" so we assign a role to avoid these
                # devices being seen as unused and accidentally deleted. Once
                # formatted with make-bcache -B they are accessed via a virtual
                # device which should end up with a serial of bcache-(d.uuid)
                # here we tag our backing device with it's virtual counterparts
                # serial number.
                disk_roles_identified['bcache'] = 'bcache-%s' % d.uuid
            if d.fstype == 'bcache-cdev':
                # BCACHE: continued; here we use the scan_disks() added info
                # of this bcache device being a cache device not a backing
                # device, so it will have no virtual block device counterpart
                # but likewise must be specifically attributed (ie to fast
                # ssd type drives) so we flag in the role system differently.
                disk_roles_identified['bcachecdev'] = 'bcache-%s' % d.uuid
            if d.root is True:
                # ROOT DISK: scan_disks() has already identified the current
                # truth regarding the device hosting our root '/' fs so update
                # our role accordingly.
                # N.B. value of d.fstype here is essentially a place holder as
                # the presence or otherwise of the 'root' key is all we need.
                disk_roles_identified['root'] = str(d.fstype)
            if d.partitions != {}:
                # PARTITIONS: scan_disks() has built an updated partitions dict
                # so create a partitions role containing this dictionary.
                # Convert scan_disks() transient (but just scanned so current)
                # sda type names to a more useful by-id type name as found
                # in /dev/disk/by-id for each partition name.
                byid_partitions = {
                    get_dev_byid_name(part, True)[0]:
                    d.partitions.get(part, "")
                    for part in d.partitions
                }
                # In the above we fail over to "" on failed index for now.
                disk_roles_identified['partitions'] = byid_partitions
            # Now we join the previous non scan_disks identified roles dict
            # with those we have identified from our fresh scan_disks() data
            # and return the result to our db entry in json format.
            # Note that dict of {} isn't None
            if (non_scan_disks_roles != {}) or (disk_roles_identified != {}):
                combined_roles = dict(non_scan_disks_roles,
                                      **disk_roles_identified)
                dob.role = json.dumps(combined_roles)
            else:
                dob.role = None
            # END OF ROLE FIELD UPDATE
            # If our existing Pool db knows of this disk's pool via it's label:
            if (Pool.objects.filter(name=d.label).exists()):
                # update the disk db object's pool field accordingly.
                dob.pool = Pool.objects.get(name=d.label)

                # this is for backwards compatibility. root pools created
                # before the pool.role migration need this. It can safely be
                # removed a few versions after 3.8-11 or when we reset
                # migrations.
                if (d.root is True):
                    dob.pool.role = 'root'
                    dob.pool.save()
            else:  # this disk is not known to exist in any pool via it's label
                dob.pool = None
            # If no pool has yet been found with this disk's label in and
            # the attached disk is our root disk (flagged by scan_disks)
            if (dob.pool is None and d.root is True):
                # setup our special root disk db entry in Pool
                # TODO: dynamically retrieve raid level.
                p = Pool(name=d.label, raid='single', role='root')
                p.save()
                p.disk_set.add(dob)
                # update disk db object to reflect special root pool status
                dob.pool = p
                dob.save()
                p.size = p.usage_bound()
                enable_quota(p)
                p.uuid = btrfs_uuid(dob.name)
                p.save()
            # save our updated db disk object
            dob.save()
        # Update online db entries with S.M.A.R.T availability and status.
        for do in Disk.objects.all():
            # find all the not offline db entries
            if (not do.offline):
                # We have an attached disk db entry.
                # Since our Disk.name model now uses by-id type names we can
                # do cheap matches to the beginnings of these names to find
                # virtio, md, or sdcard devices which are assumed to have no
                # SMART capability.
                # We also disable devices smart support when they have a
                # fake serial number as ascribed by scan_disks as any SMART
                # data collected is then less likely to be wrongly associated
                # with the next device that takes this temporary drive's name.
                # Also note that with no serial number some device types will
                # not have a by-id type name expected by the smart subsystem.
                # This has only been observed in no serial virtio devices.
                if (re.match('fake-serial-', do.serial)
                        is not None) or (re.match(
                            'virtio-|md-|mmc-|nvme-|dm-name-luks-|bcache',
                            do.name) is not None):
                    # Virtio disks (named virtio-*), md devices (named md-*),
                    # and an sdcard reader that provides devs named mmc-* have
                    # no smart capability so avoid cluttering logs with
                    # exceptions on probing these with smart.available.
                    # nvme not yet supported by CentOS 7 smartmontools:
                    # https://www.smartmontools.org/ticket/657
                    # Thanks to @snafu in rockstor forum post 1567 for this.
                    do.smart_available = do.smart_enabled = False
                    continue
                # try to establish smart availability and status and update db
                try:
                    # for non ata/sata drives
                    do.smart_available, do.smart_enabled = smart.available(
                        do.name, do.smart_options)
                except Exception as e:
                    logger.exception(e)
                    do.smart_available = do.smart_enabled = False
            do.save()
        ds = DiskInfoSerializer(Disk.objects.all().order_by('name'), many=True)
        return Response(ds.data)
예제 #11
0
    def _update_disk_state():
        """
        A db atomic method to update the database of attached disks / drives.
        Works only on device serial numbers for drive identification.
        Calls scan_disks to establish the current connected drives info.
        Initially removes duplicate by serial number db entries to deal
        with legacy db states and obfuscates all previous device names as they
        are transient. The drive database is then updated with the attached
        disks info and previously known drives no longer found attached are
        marked as offline. All offline drives have their SMART availability and
        activation status removed and all attached drives have their SMART
        availability assessed and activated if available.
        :return: serialized models of attached and missing disks via serial num
        """
        # Acquire a list (namedtupil collection) of attached drives > min size
        disks = scan_disks(settings.MIN_DISK_SIZE)
        serial_numbers_seen = []
        # Make sane our db entries in view of what we know we have attached.
        # Device serial number is only known external unique entry, scan_disks
        # make this so in the case of empty or repeat entries by providing
        # fake serial numbers which are in turn flagged via WebUI as unreliable.
        # 1) scrub all device names with unique but nonsense uuid4
        # 1) mark all offline disks as such via db flag
        # 2) mark all offline disks smart available and enabled flags as False
        # logger.info('update_disk_state() Called')
        for do in Disk.objects.all():
            # Replace all device names with a unique placeholder on each scan
            # N.B. do not optimize by re-using uuid index as this could lead
            # to a non refreshed webui acting upon an entry that is different
            # from that shown to the user.
            do.name = 'detached-' + str(uuid.uuid4()).replace('-', '')
            # Delete duplicate or fake by serial number db disk entries.
            # It makes no sense to save fake serial number drives between scans
            # as on each scan the serial number is re-generated (fake) anyway.
            # Serial numbers beginning with 'fake-serial-' are from scan_disks.
            if (do.serial in serial_numbers_seen) or (re.match(
                    'fake-serial-', do.serial) is not None):
                logger.info('Deleting duplicate or fake (by serial) Disk db '
                            'entry. Serial = %s' % do.serial)
                do.delete()  # django >=1.9 returns a dict of deleted items.
                # Continue onto next db disk object as nothing more to process.
                continue
            # first encounter of this serial in the db so stash it for reference
            serial_numbers_seen.append(deepcopy(do.serial))
            # Look for devices (by serial number) that are in the db but not in
            # our disk scan, ie offline / missing.
            if (do.serial not in [d.serial for d in disks]):
                # update the db entry as offline
                do.offline = True
                # disable S.M.A.R.T available and enabled flags.
                do.smart_available = do.smart_enabled = False
            do.save()  # make sure all updates are flushed to db
        # Our db now has no device name info as all dev names are place holders.
        # Iterate over attached drives to update the db's knowledge of them.
        # Kernel dev names are unique so safe to overwrite our db unique name.
        for d in disks:
            # start with an empty disk object
            dob = None
            # Convert our transient but just scanned so current sda type name
            # to a more useful by-id type name as found in /dev/disk/by-id
            byid_disk_name, is_byid = get_dev_byid_name(d.name, True)
            # If the db has an entry with this disk's serial number then
            # use this db entry and update the device name from our recent scan.
            if (Disk.objects.filter(serial=d.serial).exists()):
                dob = Disk.objects.get(serial=d.serial)
                #dob.name = d.name
                dob.name = byid_disk_name
            else:
                # We have an assumed new disk entry as no serial match in db.
                # Build a new entry for this disk.
                #dob = Disk(name=d.name, serial=d.serial)
                # N.B. we may want to force a fake-serial here if is_byid False,
                # that way we flag as unusable disk as no by-id type name found.
                # It may already have been set though as the only by-id
                # failures so far are virtio disks with no serial so scan_disks
                # will have already given it a fake serial in d.serial.
                dob = Disk(name=byid_disk_name, serial=d.serial)
            # Update the db disk object (existing or new) with our scanned info
            dob.size = d.size
            dob.parted = d.parted
            dob.offline = False  # as we are iterating over attached devices
            dob.model = d.model
            dob.transport = d.transport
            dob.vendor = d.vendor
            dob.btrfs_uuid = d.btrfs_uuid
            # If attached disk has an fs and it isn't btrfs
            if (d.fstype is not None and d.fstype != 'btrfs'):
                dob.btrfs_uuid = None
                dob.parted = True  # overload use of parted as non btrfs flag.
                # N.B. this overload use may become redundant with the addition
                # of the Disk.role field.
            # Update the role field with scan_disks findings, currently only
            # mdraid membership type based on fstype info. In the case of
            # these raid member indicators from scan_disks() we have the
            # current truth provided so update the db role status accordingly.
            # N.B. this if else could be expanded to accommodate other
            # roles based on the fs found
            if d.fstype == 'isw_raid_member' or d.fstype == 'linux_raid_member':
                # We have an indicator of mdraid membership so update existing
                # role info if any.
                # N.B. We have a minor legacy issue in that prior to using json
                # format for the db role field we stored one of 2 strings.
                # if these 2 strings are found then ignore them as we then
                # overwrite with our current finding and in the new json format.
                # I.e. non None could also be a legacy entry so follow overwrite
                # path when legacy entry found by treating as a None entry.
                # TODO: When we reset migrations the following need only check
                # TODO: "dob.role is not None"
                if dob.role is not None and dob.role != 'isw_raid_member' \
                        and dob.role != 'linux_raid_member':
                    # get our known roles into a dictionary
                    known_roles = json.loads(dob.role)
                    # create or update an mdraid dictionary entry
                    known_roles['mdraid'] = str(d.fstype)
                    # return updated dict to json format and store in db object
                    dob.role = json.dumps(known_roles)
                else:  # We have a dob.role = None so just insert our new role.
                    # Also applies to legacy pre json role entries.
                    dob.role = '{"mdraid": "' + d.fstype + '"}'  # json string
            else:  # We know this disk is not an mdraid raid member.
                # No identified role from scan_disks() fstype value (mdraid
                # only for now )so we preserve any prior known roles not
                # exposed by scan_disks but remove the mdraid role if found.
                # TODO: When we reset migrations the following need only check
                # TODO: "dob.role is not None"
                if dob.role is not None and dob.role != 'isw_raid_member' \
                        and dob.role != 'linux_raid_member':
                    # remove mdraid role if found but preserve prior roles
                    # which should now only be in json format
                    known_roles = json.loads(dob.role)
                    if 'mdraid' in known_roles:
                        if len(known_roles) > 1:
                            # mdraid is not the only entry so we have to pull
                            # out only mdraid from dict and convert back to json
                            del known_roles['mdraid']
                            dob.role = json.dumps(known_roles)
                        else:
                            # mdraid was the only entry so we need not bother
                            # with dict edit and json conversion only to end up
                            # with an empty json {} so revert to default 'None'.
                            dob.role = None
                else:  # Empty or legacy role entry.
                    # We have either None or a legacy mdraid role when this disk
                    # is no longer an mdraid member. We can now assert None.
                    dob.role = None
            # If our existing Pool db knows of this disk's pool via it's label:
            if (Pool.objects.filter(name=d.label).exists()):
                # update the disk db object's pool field accordingly.
                dob.pool = Pool.objects.get(name=d.label)

                #this is for backwards compatibility. root pools created
                #before the pool.role migration need this. It can safely be
                #removed a few versions after 3.8-11 or when we reset migrations.
                if (d.root is True):
                    dob.pool.role = 'root'
                    dob.pool.save()
            else:  # this disk is not known to exist in any pool via it's label
                dob.pool = None
            # If no pool has yet been found with this disk's label in and
            # the attached disk is our root disk (flagged by scan_disks)
            if (dob.pool is None and d.root is True):
                # setup our special root disk db entry in Pool
                # TODO: dynamically retrieve raid level.
                p = Pool(name=d.label, raid='single', role='root')
                p.save()
                p.disk_set.add(dob)
                # update disk db object to reflect special root pool status
                dob.pool = p
                dob.save()
                p.size = p.usage_bound()
                enable_quota(p)
                p.uuid = btrfs_uuid(dob.name)
                p.save()
            # save our updated db disk object
            dob.save()
        # Update online db entries with S.M.A.R.T availability and status.
        for do in Disk.objects.all():
            # find all the not offline db entries
            if (not do.offline):
                # We have an attached disk db entry.
                # Since our Disk.name model now uses by-id type names we can
                # do cheap matches to the beginnings of these names to find
                # virtio, md, or sdcard devices which are assumed to have no
                # SMART capability.
                # We also disable devices smart support when they have a
                # fake serial number as ascribed by scan_disks as any SMART
                # data collected is then less likely to be wrongly associated
                # with the next device that takes this temporary drive's name.
                # Also note that with no serial number some device types will
                # not have a by-id type name expected by the smart subsystem.
                # This has only been observed in no serial virtio devices.
                if (re.match('fake-serial-', do.serial) is not None) or \
                        (re.match('virtio-|md-|mmc-|nvme-', do.name) is not None):
                    # Virtio disks (named virtio-*), md devices (named md-*),
                    # and an sdcard reader that provides devs named mmc-* have
                    # no smart capability so avoid cluttering logs with
                    # exceptions on probing these with smart.available.
                    # nvme not yet supported by CentOS 7 smartmontools:
                    # https://www.smartmontools.org/ticket/657
                    # Thanks to @snafu in rockstor forum post 1567 for this.
                    do.smart_available = do.smart_enabled = False
                    continue
                # try to establish smart availability and status and update db
                try:
                    # for non ata/sata drives
                    do.smart_available, do.smart_enabled = smart.available(
                        do.name, do.smart_options)
                except Exception, e:
                    logger.exception(e)
                    do.smart_available = do.smart_enabled = False
            do.save()
예제 #12
0
 def test_get_dev_byid_name(self):
     """
     Test get_dev_byid_name() across a range of inputs.
     """
     # Note in the first set we have a non DEVLINKS first line and
     # 3 equal length (37 char) names:
     # scsi-1ATA_QEMU_HARDDISK_sys-357-part1
     # scsi-0ATA_QEMU_HARDDISK_sys-357-part1
     # scsi-SATA_QEMU_HARDDISK_sys-357-part1
     # and one shorter, all for the same device.
     # ata-QEMU_HARDDISK_sys-357-part1
     dev_name = ['/dev/sda1']
     remove_path = [True]
     out = [[
         'COMPAT_SYMLINK_GENERATION=2',
         'DEVLINKS=/dev/disk/by-id/ata-QEMU_HARDDISK_sys-357-part1 /dev/disk/by-id/scsi-0ATA_QEMU_HARDDISK_sys-357-part1 /dev/disk/by-path/pci-0000:00:06.0-ata-1-part1 /dev/disk/by-id/scsi-1ATA_QEMU_HARDDISK_sys-357-part1 /dev/disk/by-uuid/c66d68dd-597e-4525-9eea-3add073378d0 /dev/disk/by-partuuid/8ae50ecc-d866-4187-a4ec-79b096bdf8ed /dev/disk/by-label/system /dev/disk/by-id/scsi-SATA_QEMU_HARDDISK_sys-357-part1',  # noqa E501
         'DEVNAME=/dev/sda1',
         'DEVPATH=/devices/pci0000:00/0000:00:06.0/ata1/host0/target0:0:0/0:0:0:0/block/sda/sda1',  # noqa E501
         'DEVTYPE=partition', 'DONT_DEL_PART_NODES=1', 'ID_ATA=1',
         'ID_ATA_FEATURE_SET_SMART=1',
         'ID_ATA_FEATURE_SET_SMART_ENABLED=1', 'ID_ATA_SATA=1',
         'ID_ATA_WRITE_CACHE=1', 'ID_ATA_WRITE_CACHE_ENABLED=1',
         'ID_BTRFS_READY=1', 'ID_BUS=ata', 'ID_FS_LABEL=system',
         'ID_FS_LABEL_ENC=system', 'ID_FS_TYPE=btrfs',
         'ID_FS_USAGE=filesystem',
         'ID_FS_UUID=c66d68dd-597e-4525-9eea-3add073378d0',
         'ID_FS_UUID_ENC=c66d68dd-597e-4525-9eea-3add073378d0',
         'ID_FS_UUID_SUB=76c503a3-3310-45ad-8457-38c35c2cf295',
         'ID_FS_UUID_SUB_ENC=76c503a3-3310-45ad-8457-38c35c2cf295',
         'ID_MODEL=QEMU_HARDDISK',
         'ID_MODEL_ENC=QEMU\\x20HARDDISK\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20',  # noqa E501
         'ID_PART_ENTRY_DISK=8:0', 'ID_PART_ENTRY_FLAGS=0x4',
         'ID_PART_ENTRY_NUMBER=1', 'ID_PART_ENTRY_OFFSET=2048',
         'ID_PART_ENTRY_SCHEME=gpt', 'ID_PART_ENTRY_SIZE=16775135',
         'ID_PART_ENTRY_TYPE=0fc63daf-8483-4772-8e79-3d69d8477de4',
         'ID_PART_ENTRY_UUID=8ae50ecc-d866-4187-a4ec-79b096bdf8ed',
         'ID_PART_TABLE_TYPE=dos',
         'ID_PART_TABLE_UUID=2c013305-39f1-42df-950b-f6953117e09c',
         'ID_PATH=pci-0000:00:06.0-ata-1',
         'ID_PATH_TAG=pci-0000_00_06_0-ata-1', 'ID_REVISION=2.5+',
         'ID_SCSI=1', 'ID_SCSI_INQUIRY=1',
         'ID_SERIAL=QEMU_HARDDISK_sys-357', 'ID_SERIAL_SHORT=sys-357',
         'ID_TYPE=disk', 'ID_VENDOR=ATA',
         'ID_VENDOR_ENC=ATA\\x20\\x20\\x20\\x20\\x20', 'MAJOR=8',
         'MINOR=1', 'PARTN=1',
         'SCSI_IDENT_LUN_ATA=QEMU_HARDDISK_sys-357',
         'SCSI_IDENT_LUN_T10=ATA_QEMU_HARDDISK_sys-357',
         'SCSI_IDENT_LUN_VENDOR=sys-357', 'SCSI_IDENT_SERIAL=sys-357',
         'SCSI_MODEL=QEMU_HARDDISK',
         'SCSI_MODEL_ENC=QEMU\\x20HARDDISK\\x20\\x20\\x20',
         'SCSI_REVISION=2.5+', 'SCSI_TPGS=0', 'SCSI_TYPE=disk',
         'SCSI_VENDOR=ATA',
         'SCSI_VENDOR_ENC=ATA\\x20\\x20\\x20\\x20\\x20',
         'SUBSYSTEM=block', 'TAGS=:systemd:',
         'USEC_INITIALIZED=3052289',
         '']]
     err = [['']]
     rc = [0]
     # Expected return is always a tuple ('name-string', is_byid_boolean)
     expected_result = [('scsi-SATA_QEMU_HARDDISK_sys-357-part1', True)]
     # regular data pool disk member (whole disk).
     dev_name.append('sdb')
     remove_path.append(True)
     out.append([
         'COMPAT_SYMLINK_GENERATION=2',
         'DEVLINKS=/dev/disk/by-id/scsi-1ATA_QEMU_HARDDISK_QM00007 /dev/disk/by-id/scsi-0ATA_QEMU_HARDDISK_QM00007 /dev/disk/by-id/ata-QEMU_HARDDISK_QM00007 /dev/disk/by-label/rock-pool /dev/disk/by-id/scsi-SATA_QEMU_HARDDISK_QM00007 /dev/disk/by-path/pci-0000:00:06.0-ata-2 /dev/disk/by-uuid/429827fc-5ca9-4ca8-b152-f28d8a9d2737',  # noqa E501
         'DEVNAME=/dev/sdb',
         'DEVPATH=/devices/pci0000:00/0000:00:06.0/ata2/host1/target1:0:0/1:0:0:0/block/sdb',  # noqa E501
         'DEVTYPE=disk', 'DONT_DEL_PART_NODES=1', 'ID_ATA=1',
         'ID_ATA_FEATURE_SET_SMART=1',
         'ID_ATA_FEATURE_SET_SMART_ENABLED=1', 'ID_ATA_SATA=1',
         'ID_ATA_WRITE_CACHE=1', 'ID_ATA_WRITE_CACHE_ENABLED=1',
         'ID_BTRFS_READY=1', 'ID_BUS=ata', 'ID_FS_LABEL=rock-pool',
         'ID_FS_LABEL_ENC=rock-pool', 'ID_FS_TYPE=btrfs',
         'ID_FS_USAGE=filesystem',
         'ID_FS_UUID=429827fc-5ca9-4ca8-b152-f28d8a9d2737',
         'ID_FS_UUID_ENC=429827fc-5ca9-4ca8-b152-f28d8a9d2737',
         'ID_FS_UUID_SUB=0c17e54b-09e9-4074-9577-c26c9af499a1',
         'ID_FS_UUID_SUB_ENC=0c17e54b-09e9-4074-9577-c26c9af499a1',
         'ID_MODEL=QEMU_HARDDISK',
         'ID_MODEL_ENC=QEMU\\x20HARDDISK\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20',  # noqa E501
         'ID_PATH=pci-0000:00:06.0-ata-2',
         'ID_PATH_TAG=pci-0000_00_06_0-ata-2', 'ID_REVISION=2.5+',
         'ID_SCSI=1', 'ID_SCSI_INQUIRY=1',
         'ID_SERIAL=QEMU_HARDDISK_QM00007',
         'ID_SERIAL_SHORT=QM00007',
         'ID_TYPE=disk', 'ID_VENDOR=ATA',
         'ID_VENDOR_ENC=ATA\\x20\\x20\\x20\\x20\\x20', 'MAJOR=8',
         'MINOR=16', 'MPATH_SBIN_PATH=/sbin',
         'SCSI_IDENT_LUN_ATA=QEMU_HARDDISK_QM00007',
         'SCSI_IDENT_LUN_T10=ATA_QEMU_HARDDISK_QM00007',
         'SCSI_IDENT_LUN_VENDOR=QM00007',
         'SCSI_IDENT_SERIAL=QM00007',
         'SCSI_MODEL=QEMU_HARDDISK',
         'SCSI_MODEL_ENC=QEMU\\x20HARDDISK\\x20\\x20\\x20',
         'SCSI_REVISION=2.5+', 'SCSI_TPGS=0', 'SCSI_TYPE=disk',
         'SCSI_VENDOR=ATA',
         'SCSI_VENDOR_ENC=ATA\\x20\\x20\\x20\\x20\\x20',
         'SUBSYSTEM=block', 'TAGS=:systemd:',
         'USEC_INITIALIZED=3063907',
         ''])
     err.append([''])
     rc.append(0)
     expected_result.append(('scsi-SATA_QEMU_HARDDISK_QM00007', True))
     # Typical call type when resizing / changing raid level of pool
     dev_name.append('/dev/sdc')
     remove_path.append(False)
     out.append([
         'COMPAT_SYMLINK_GENERATION=2',
         'DEVLINKS=/dev/disk/by-label/rock-pool /dev/disk/by-id/scsi-SATA_QEMU_HARDDISK_QM00009 /dev/disk/by-id/scsi-1ATA_QEMU_HARDDISK_QM00009 /dev/disk/by-path/pci-0000:00:06.0-ata-3 /dev/disk/by-id/scsi-0ATA_QEMU_HARDDISK_QM00009 /dev/disk/by-uuid/429827fc-5ca9-4ca8-b152-f28d8a9d2737 /dev/disk/by-id/ata-QEMU_HARDDISK_QM00009',  # noqa E501
         'DEVNAME=/dev/sdc',
         'DEVPATH=/devices/pci0000:00/0000:00:06.0/ata3/host2/target2:0:0/2:0:0:0/block/sdc',  # noqa E501
         'DEVTYPE=disk', 'DONT_DEL_PART_NODES=1', 'ID_ATA=1',
         'ID_ATA_FEATURE_SET_SMART=1',
         'ID_ATA_FEATURE_SET_SMART_ENABLED=1', 'ID_ATA_SATA=1',
         'ID_ATA_WRITE_CACHE=1', 'ID_ATA_WRITE_CACHE_ENABLED=1',
         'ID_BTRFS_READY=1', 'ID_BUS=ata', 'ID_FS_LABEL=rock-pool',
         'ID_FS_LABEL_ENC=rock-pool', 'ID_FS_TYPE=btrfs',
         'ID_FS_USAGE=filesystem',
         'ID_FS_UUID=429827fc-5ca9-4ca8-b152-f28d8a9d2737',
         'ID_FS_UUID_ENC=429827fc-5ca9-4ca8-b152-f28d8a9d2737',
         'ID_FS_UUID_SUB=21eade9f-1e18-499f-b506-d0b5b575b240',
         'ID_FS_UUID_SUB_ENC=21eade9f-1e18-499f-b506-d0b5b575b240',
         'ID_MODEL=QEMU_HARDDISK',
         'ID_MODEL_ENC=QEMU\\x20HARDDISK\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20',  # noqa E501
         'ID_PATH=pci-0000:00:06.0-ata-3',
         'ID_PATH_TAG=pci-0000_00_06_0-ata-3', 'ID_REVISION=2.5+',
         'ID_SCSI=1', 'ID_SCSI_INQUIRY=1',
         'ID_SERIAL=QEMU_HARDDISK_QM00009',
         'ID_SERIAL_SHORT=QM00009',
         'ID_TYPE=disk', 'ID_VENDOR=ATA',
         'ID_VENDOR_ENC=ATA\\x20\\x20\\x20\\x20\\x20', 'MAJOR=8',
         'MINOR=32', 'MPATH_SBIN_PATH=/sbin',
         'SCSI_IDENT_LUN_ATA=QEMU_HARDDISK_QM00009',
         'SCSI_IDENT_LUN_T10=ATA_QEMU_HARDDISK_QM00009',
         'SCSI_IDENT_LUN_VENDOR=QM00009',
         'SCSI_IDENT_SERIAL=QM00009',
         'SCSI_MODEL=QEMU_HARDDISK',
         'SCSI_MODEL_ENC=QEMU\\x20HARDDISK\\x20\\x20\\x20',
         'SCSI_REVISION=2.5+', 'SCSI_TPGS=0', 'SCSI_TYPE=disk',
         'SCSI_VENDOR=ATA',
         'SCSI_VENDOR_ENC=ATA\\x20\\x20\\x20\\x20\\x20',
         'SUBSYSTEM=block', 'TAGS=:systemd:',
         'USEC_INITIALIZED=3054291',
         ''])
     err.append([''])
     rc.append(0)
     expected_result.append(
         ('/dev/disk/by-id/scsi-SATA_QEMU_HARDDISK_QM00009', True))
     # Query on an openLUKS container (backed by bcache):
     dev_name.append('luks-a47f4950-3296-4504-b9a4-2dc75681a6ad')
     remove_path.append(True)
     out.append([
         'DEVLINKS=/dev/disk/by-id/dm-name-luks-a47f4950-3296-4504-b9a4-2dc75681a6ad /dev/disk/by-id/dm-uuid-CRYPT-LUKS1-a47f495032964504b9a42dc75681a6ad-luks-a47f4950-3296-4504-b9a4-2dc75681a6ad /dev/disk/by-label/luks-pool-on-bcache /dev/disk/by-uuid/8ad02be6-fc5f-4342-bdd2-f992e7792a5b /dev/mapper/luks-a47f4950-3296-4504-b9a4-2dc75681a6ad',  # noqa E501
         'DEVNAME=/dev/dm-0', 'DEVPATH=/devices/virtual/block/dm-0',
         'DEVTYPE=disk', 'DM_ACTIVATION=1',
         'DM_NAME=luks-a47f4950-3296-4504-b9a4-2dc75681a6ad',
         'DM_SUSPENDED=0', 'DM_UDEV_PRIMARY_SOURCE_FLAG=1',
         'DM_UDEV_RULES_VSN=2',
         'DM_UUID=CRYPT-LUKS1-a47f495032964504b9a42dc75681a6ad-luks-a47f4950-3296-4504-b9a4-2dc75681a6ad',  # noqa E501
         'ID_FS_LABEL=luks-pool-on-bcache',
         'ID_FS_LABEL_ENC=luks-pool-on-bcache', 'ID_FS_TYPE=btrfs',
         'ID_FS_USAGE=filesystem',
         'ID_FS_UUID=8ad02be6-fc5f-4342-bdd2-f992e7792a5b',
         'ID_FS_UUID_ENC=8ad02be6-fc5f-4342-bdd2-f992e7792a5b',
         'ID_FS_UUID_SUB=70648d6c-be07-42ee-88ff-0e9c68a5415c',
         'ID_FS_UUID_SUB_ENC=70648d6c-be07-42ee-88ff-0e9c68a5415c',
         'MAJOR=251', 'MINOR=0', 'SUBSYSTEM=block', 'TAGS=:systemd:',
         'USEC_INITIALIZED=10617229', ''])
     err.append([''])
     rc.append(0)
     expected_result.append(
         ('dm-name-luks-a47f4950-3296-4504-b9a4-2dc75681a6ad', True))
     # Query on a bcache backing device, this assumes the udev rules as
     # detailed in forum wiki entry:
     # https://forum.rockstor.com/t/bcache-developers-notes/2762
     dev_name.append('bcache0')
     remove_path.append(True)
     out.append([
         'DEVLINKS=/dev/disk/by-id/bcache-QEMU_HARDDISK-bcache-bdev-1 /dev/disk/by-uuid/3efb3830-fee1-4a9e-a5c6-ea456bfc269e',  # noqa E501
         'DEVNAME=/dev/bcache0', 'DEVPATH=/devices/virtual/block/bcache0',
         'DEVTYPE=disk',
         'ID_BCACHE_BDEV_FS_UUID=c9ed805f-b141-4ce9-80c7-9f9e1f71195d',
         'ID_BCACHE_BDEV_MODEL=QEMU_HARDDISK',
         'ID_BCACHE_BDEV_SERIAL=bcache-bdev-1',
         'ID_BCACHE_CSET_UUID=16657e0a-a7e0-48bc-9a69-433c0f2cd920',
         'ID_FS_TYPE=crypto_LUKS', 'ID_FS_USAGE=crypto',
         'ID_FS_UUID=3efb3830-fee1-4a9e-a5c6-ea456bfc269e',
         'ID_FS_UUID_ENC=3efb3830-fee1-4a9e-a5c6-ea456bfc269e',
         'ID_FS_VERSION=1',
         'ID_SERIAL=bcache-c9ed805f-b141-4ce9-80c7-9f9e1f71195d',
         'MAJOR=252', 'MINOR=0', 'SUBSYSTEM=block', 'TAGS=:systemd:',
         'USEC_INITIALIZED=76148', ''])
     err.append([''])
     rc.append(0)
     expected_result.append(('bcache-QEMU_HARDDISK-bcache-bdev-1', True))
     # regular virtio device hosting a LUKS container:
     dev_name.append('vdb')
     remove_path.append(True)
     out.append([
         'DEVLINKS=/dev/disk/by-id/virtio-serial-5 /dev/disk/by-path/virtio-pci-0000:00:0d.0 /dev/disk/by-uuid/41cd2e3c-3bd6-49fc-9f42-20e368a66efc',  # noqa E501
         'DEVNAME=/dev/vdb',
         'DEVPATH=/devices/pci0000:00/0000:00:0d.0/virtio4/block/vdb',
         'DEVTYPE=disk', 'ID_FS_TYPE=crypto_LUKS', 'ID_FS_USAGE=crypto',
         'ID_FS_UUID=41cd2e3c-3bd6-49fc-9f42-20e368a66efc',
         'ID_FS_UUID_ENC=41cd2e3c-3bd6-49fc-9f42-20e368a66efc',
         'ID_FS_VERSION=1', 'ID_PATH=virtio-pci-0000:00:0d.0',
         'ID_PATH_TAG=virtio-pci-0000_00_0d_0', 'ID_SERIAL=serial-5',
         'MAJOR=253', 'MINOR=16', 'SUBSYSTEM=block', 'TAGS=:systemd:',
         'USEC_INITIALIZED=4469', ''])
     err.append([''])
     rc.append(0)
     expected_result.append(('virtio-serial-5', True))
     # legacy root drive (with serial "sys-drive-serial-number")
     dev_name.append('sda3')
     remove_path.append(True)
     out.append([
         'DEVLINKS=/dev/disk/by-id/ata-QEMU_HARDDISK_sys-drive-serial-num-part3 /dev/disk/by-label/rockstor_rockstor /dev/disk/by-path/pci-0000:00:05.0-ata-1.0-part3 /dev/disk/by-uuid/a98f88c2-2031-4bd3-9124-2f9d8a77987c',  # noqa E501
         'DEVNAME=/dev/sda3',
         'DEVPATH=/devices/pci0000:00/0000:00:05.0/ata3/host2/target2:0:0/2:0:0:0/block/sda/sda3',  # noqa E501
         'DEVTYPE=partition', 'ID_ATA=1', 'ID_ATA_FEATURE_SET_SMART=1',
         'ID_ATA_FEATURE_SET_SMART_ENABLED=1', 'ID_ATA_SATA=1',
         'ID_ATA_WRITE_CACHE=1', 'ID_ATA_WRITE_CACHE_ENABLED=1',
         'ID_BUS=ata', 'ID_FS_LABEL=rockstor_rockstor',
         'ID_FS_LABEL_ENC=rockstor_rockstor', 'ID_FS_TYPE=btrfs',
         'ID_FS_USAGE=filesystem',
         'ID_FS_UUID=a98f88c2-2031-4bd3-9124-2f9d8a77987c',
         'ID_FS_UUID_ENC=a98f88c2-2031-4bd3-9124-2f9d8a77987c',
         'ID_FS_UUID_SUB=81b9232f-0981-4753-ab0c-1a686b6ad3a9',
         'ID_FS_UUID_SUB_ENC=81b9232f-0981-4753-ab0c-1a686b6ad3a9',
         'ID_MODEL=QEMU_HARDDISK',
         'ID_MODEL_ENC=QEMU\\x20HARDDISK\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20',  # noqa E501
         'ID_PART_ENTRY_DISK=8:0', 'ID_PART_ENTRY_NUMBER=3',
         'ID_PART_ENTRY_OFFSET=2705408', 'ID_PART_ENTRY_SCHEME=dos',
         'ID_PART_ENTRY_SIZE=14071808', 'ID_PART_ENTRY_TYPE=0x83',
         'ID_PART_TABLE_TYPE=dos', 'ID_PATH=pci-0000:00:05.0-ata-1.0',
         'ID_PATH_TAG=pci-0000_00_05_0-ata-1_0', 'ID_REVISION=2.4.0',
         'ID_SERIAL=QEMU_HARDDISK_sys-drive-serial-num',
         'ID_SERIAL_SHORT=sys-drive-serial-num', 'ID_TYPE=disk', 'MAJOR=8',
         'MINOR=3', 'PARTN=3', 'SUBSYSTEM=block', 'TAGS=:systemd:',
         'USEC_INITIALIZED=81921', ''])
     err.append([''])
     rc.append(0)
     expected_result.append(
         ('ata-QEMU_HARDDISK_sys-drive-serial-num-part3', True))
     # above legacy root device via virtio interface with no serial and so
     # no by-id name.
     dev_name.append('vda3')
     remove_path.append(True)
     out.append([
         'DEVLINKS=/dev/disk/by-label/rockstor_rockstor /dev/disk/by-path/virtio-pci-0000:00:09.0-part3 /dev/disk/by-uuid/a98f88c2-2031-4bd3-9124-2f9d8a77987c',  # noqa E501
         'DEVNAME=/dev/vda3',
         'DEVPATH=/devices/pci0000:00/0000:00:09.0/virtio3/block/vda/vda3',
         'DEVTYPE=partition', 'ID_FS_LABEL=rockstor_rockstor',
         'ID_FS_LABEL_ENC=rockstor_rockstor', 'ID_FS_TYPE=btrfs',
         'ID_FS_USAGE=filesystem',
         'ID_FS_UUID=a98f88c2-2031-4bd3-9124-2f9d8a77987c',
         'ID_FS_UUID_ENC=a98f88c2-2031-4bd3-9124-2f9d8a77987c',
         'ID_FS_UUID_SUB=81b9232f-0981-4753-ab0c-1a686b6ad3a9',
         'ID_FS_UUID_SUB_ENC=81b9232f-0981-4753-ab0c-1a686b6ad3a9',
         'ID_PART_ENTRY_DISK=253:0', 'ID_PART_ENTRY_NUMBER=3',
         'ID_PART_ENTRY_OFFSET=2705408', 'ID_PART_ENTRY_SCHEME=dos',
         'ID_PART_ENTRY_SIZE=14071808', 'ID_PART_ENTRY_TYPE=0x83',
         'ID_PART_TABLE_TYPE=dos', 'ID_PATH=virtio-pci-0000:00:09.0',
         'ID_PATH_TAG=virtio-pci-0000_00_09_0', 'MAJOR=253', 'MINOR=3',
         'PARTN=3', 'SUBSYSTEM=block', 'TAGS=:systemd:',
         'USEC_INITIALIZED=2699', ''])
     err.append([''])
     rc.append(0)
     expected_result.append(('vda3', False))
     # Cycle through each of the above parameter / run_command data sets.
     for dev, rp, o, e, r, expected in zip(dev_name, remove_path, out, err,
                                           rc, expected_result):
         self.mock_run_command.return_value = (o, e, r)
         returned = get_dev_byid_name(dev, rp)
         self.assertEqual(returned, expected,
                          msg='Un-expected get_dev_byid_name() result:\n '
                              'returned = ({}).\n '
                              'expected = ({}).'.format(returned, expected))
예제 #13
0
 def hdparm_setting(self, *args, **kwargs):
     try:
         return read_hdparm_setting(get_dev_byid_name(self.name))
     except:
         return None
예제 #14
0
 def hdparm_setting(self, *args, **kwargs):
     try:
         return read_hdparm_setting(get_dev_byid_name(self.name))
     except:
         return None