Пример #1
0
 def _refresh_pool_state():
     for p in Pool.objects.all():
         # If our pool has no disks, detached included, then delete it.
         # We leave pools with all detached members in place intentionally.
         if (p.disk_set.count() == 0):
             p.delete()
             continue
         # Log if no attached members are found, ie all devs are detached.
         if p.disk_set.attached().count() == 0:
             logger.error('Skipping Pool (%s) mount as there '
                          'are no attached devices. Moving on.' %
                          p.name)
             continue
         try:
             mount_root(p)
             first_attached_dev = p.disk_set.attached().first()
             # Observe any redirect role by using target_name.
             pool_info = get_pool_info(first_attached_dev.target_name)
             p.name = pool_info['label']
             p.raid = pool_raid('%s%s' % (settings.MNT_PT, p.name))['data']
             p.size = p.usage_bound()
             p.save()
         except Exception as e:
             logger.error('Exception while refreshing state for '
                          'Pool(%s). Moving on: %s' %
                          (p.name, e.__str__()))
             logger.exception(e)
Пример #2
0
 def _refresh_pool_state():
     for p in Pool.objects.all():
         if (p.disk_set.count() == 0):
             p.delete()
             continue
         try:
             mount_root(p)
             first_dev = p.disk_set.first()
             first_dev_name = first_dev.name
             # if we are looking at a device with a redirect role then
             # redirect accordingly.
             if first_dev.role is not None:
                 disk_role_dict = json.loads(first_dev.role)
                 if 'redirect' in disk_role_dict:
                     # consider replacing None with first_dev.name
                     first_dev_name = disk_role_dict.get('redirect', None)
             pool_info = get_pool_info(first_dev_name)
             p.name = pool_info['label']
             p.raid = pool_raid('%s%s' % (settings.MNT_PT, p.name))['data']
             p.size = p.usage_bound()
             p.save()
         except Exception as e:
             logger.error('Exception while refreshing state for '
                          'Pool(%s). Moving on: %s' % (p.name, e.__str__()))
             logger.exception(e)
Пример #3
0
 def _btrfs_disk_import(self, dname, request):
     try:
         disk = self._validate_disk(dname, request)
         p_info = get_pool_info(dname)
         # get some options from saved config?
         po = Pool(name=p_info['label'], raid="unknown")
         # need to save it so disk objects get updated properly in the for
         # loop below.
         po.save()
         for d in p_info['disks']:
             do = Disk.objects.get(name=d)
             do.pool = po
             do.parted = False
             do.save()
             mount_root(po)
         po.raid = pool_raid('%s%s' % (settings.MNT_PT, po.name))['data']
         po.size = pool_usage('%s%s' % (settings.MNT_PT, po.name))[0]
         po.save()
         enable_quota(po)
         import_shares(po, request)
         for share in Share.objects.filter(pool=po):
             import_snapshots(share)
         return Response(DiskInfoSerializer(disk).data)
     except Exception, e:
         e_msg = ('Failed to import any pool on this device(%s). Error: %s'
                  % (dname, e.__str__()))
         handle_exception(Exception(e_msg), request)
Пример #4
0
 def _btrfs_disk_import(self, dname, request):
     try:
         disk = self._validate_disk(dname, request)
         p_info = get_pool_info(dname)
         # get some options from saved config?
         po = Pool(name=p_info['label'], raid="unknown")
         # need to save it so disk objects get updated properly in the for
         # loop below.
         po.save()
         for d in p_info['disks']:
             do = Disk.objects.get(name=d)
             do.pool = po
             do.parted = False
             do.save()
             mount_root(po)
         po.raid = pool_raid('%s%s' % (settings.MNT_PT, po.name))['data']
         po.size = pool_usage('%s%s' % (settings.MNT_PT, po.name))[0]
         po.save()
         enable_quota(po)
         import_shares(po, request)
         for share in Share.objects.filter(pool=po):
             import_snapshots(share)
         return Response(DiskInfoSerializer(disk).data)
     except Exception, e:
         e_msg = (
             'Failed to import any pool on this device(%s). Error: %s' %
             (dname, e.__str__()))
         handle_exception(Exception(e_msg), request)
Пример #5
0
 def _refresh_pool_state():
     # Get map of dm-0 to /dev/mapper members ie luks-.. devices.
     mapped_devs = get_device_mapper_map()
     # Get temp_names (kernel names) to btrfs pool info for attached devs.
     dev_pool_info = get_dev_pool_info()
     for p in Pool.objects.all():
         # If our pool has no disks, detached included, then delete it.
         # We leave pools with all detached members in place intentionally.
         if p.disk_set.count() == 0:
             p.delete()
             continue
         # Log if no attached members are found, ie all devs are detached.
         if p.disk_set.attached().count() == 0:
             logger.error("Skipping Pool ({}) mount as there "
                          "are no attached devices. Moving on.".format(
                              p.name))
             continue
         # If pool has no missing remove all detached disk pool associations.
         # Accounts for 'end of run' clean-up in removing a detached disk and for cli
         # maintenance re pool returned to no missing dev status. Also re-establishes
         # pool info as source of truth re missing.
         if not p.has_missing_dev:
             for disk in p.disk_set.filter(name__startswith="detached-"):
                 logger.info(
                     "Removing detached disk from Pool {}: no missing "
                     "devices found.".format(p.name))
                 disk.pool = None
                 disk.save()
         try:
             # Get and save what info we can prior to mount.
             first_dev = p.disk_set.attached().first()
             # Use target_name to account for redirect role.
             if first_dev.target_name == first_dev.temp_name:
                 logger.error(
                     "Skipping pool ({}) mount as attached disk "
                     "({}) has no by-id name (no serial # ?)".format(
                         p.name, first_dev.target_name))
                 continue
             if first_dev.temp_name in mapped_devs:
                 dev_tmp_name = "/dev/mapper/{}".format(
                     mapped_devs[first_dev.temp_name])
             else:
                 dev_tmp_name = "/dev/{}".format(first_dev.temp_name)
             # For now we call get_dev_pool_info() once for each pool.
             pool_info = dev_pool_info[dev_tmp_name]
             p.name = pool_info.label
             p.uuid = pool_info.uuid
             p.save()
             mount_root(p)
             p.raid = pool_raid(p.mnt_pt)["data"]
             p.size = p.usage_bound()
             # Consider using mount_status() parse to update root pool db on
             # active (fstab initiated) compression setting.
             p.save()
         except Exception as e:
             logger.error("Exception while refreshing state for "
                          "Pool({}). Moving on: {}".format(
                              p.name, e.__str__()))
             logger.exception(e)
Пример #6
0
 def _refresh_pool_state(self, pool):
     dname = Disk.objects.filter(pool=pool)[0].name
     mount_root(pool, dname)
     pool_info = get_pool_info(dname)
     pool.name = pool_info['label']
     pool.raid = pool_raid('%s%s' % (settings.MNT_PT, pool.name))['data']
     pool.size = pool_usage('%s%s' % (settings.MNT_PT, pool.name))[0]
     pool.save()
     return pool
Пример #7
0
 def _refresh_pool_state(self, pool):
     fd = pool.disk_set.first()
     if fd is None:
         return pool.delete()
     mount_root(pool, fd.name)
     pool_info = get_pool_info(fd.name)
     pool.name = pool_info["label"]
     pool.raid = pool_raid("%s%s" % (settings.MNT_PT, pool.name))["data"]
     pool.size = pool_usage("%s%s" % (settings.MNT_PT, pool.name))[0]
     pool.save()
     return pool
Пример #8
0
 def _refresh_pool_state(self, pool):
     fd = pool.disk_set.first()
     if (fd is None):
         return pool.delete()
     mount_root(pool, fd.name)
     pool_info = get_pool_info(fd.name)
     pool.name = pool_info['label']
     pool.raid = pool_raid('%s%s' % (settings.MNT_PT, pool.name))['data']
     pool.size = pool_usage('%s%s' % (settings.MNT_PT, pool.name))[0]
     pool.save()
     return pool
Пример #9
0
 def _refresh_pool_state():
     # Get map of dm-0 to /dev/mapper members ie luks-.. devices.
     mapped_devs = get_device_mapper_map()
     # Get temp_names (kernel names) to btrfs pool info for attached devs.
     dev_pool_info = get_dev_pool_info()
     for p in Pool.objects.all():
         # If our pool has no disks, detached included, then delete it.
         # We leave pools with all detached members in place intentionally.
         if p.disk_set.count() == 0:
             p.delete()
             continue
         # Log if no attached members are found, ie all devs are detached.
         if p.disk_set.attached().count() == 0:
             logger.error("Skipping Pool ({}) mount as there "
                          "are no attached devices. Moving on.".format(
                              p.name))
             continue
         try:
             # Get and save what info we can prior to mount.
             first_dev = p.disk_set.attached().first()
             # Use target_name to account for redirect role.
             if first_dev.target_name == first_dev.temp_name:
                 logger.error(
                     "Skipping pool ({}) mount as attached disk "
                     "({}) has no by-id name (no serial # ?)".format(
                         p.name, first_dev.target_name))
                 continue
             if first_dev.temp_name in mapped_devs:
                 dev_tmp_name = "/dev/mapper/{}".format(
                     mapped_devs[first_dev.temp_name])
             else:
                 dev_tmp_name = "/dev/{}".format(first_dev.temp_name)
             # For now we call get_dev_pool_info() once for each pool.
             pool_info = dev_pool_info[dev_tmp_name]
             p.name = pool_info.label
             p.uuid = pool_info.uuid
             p.save()
             mount_root(p)
             p.raid = pool_raid(p.mnt_pt)["data"]
             p.size = p.usage_bound()
             # Consider using mount_status() parse to update root pool db on
             # active (fstab initiated) compression setting.
             p.save()
         except Exception as e:
             logger.error("Exception while refreshing state for "
                          "Pool({}). Moving on: {}".format(
                              p.name, e.__str__()))
             logger.exception(e)
Пример #10
0
 def _refresh_pool_state():
     for p in Pool.objects.all():
         if (p.disk_set.count() == 0):
             p.delete()
             continue
         try:
             mount_root(p)
             fd = p.disk_set.first()
             pool_info = get_pool_info(fd.name)
             p.name = pool_info['label']
             p.raid = pool_raid('%s%s' % (settings.MNT_PT, p.name))['data']
             p.size = p.usage_bound()
             p.save()
         except Exception, e:
             logger.error('Exception while refreshing state for '
                          'Pool(%s). Moving on: %s' % (p.name, e.__str__()))
             logger.exception(e)
Пример #11
0
 def _btrfs_disk_import(self, dname, request):
     try:
         disk = self._validate_disk(dname, request)
         disk_name = self._role_filter_disk_name(disk, request)
         p_info = get_pool_info(disk_name)
         # get some options from saved config?
         po = Pool(name=p_info['label'], raid="unknown")
         # need to save it so disk objects get updated properly in the for
         # loop below.
         po.save()
         for device in p_info['disks']:
             disk_name, isPartition = \
                 self._reverse_role_filter_name(device, request)
             do = Disk.objects.get(name=disk_name)
             do.pool = po
             # update this disk's parted property
             do.parted = isPartition
             if isPartition:
                 # ensure a redirect role to reach this partition; ie:
                 # "redirect": "virtio-serial-3-part2"
                 if do.role is not None:  # db default is null / None.
                     # Get our previous roles into a dictionary
                     roles = json.loads(do.role)
                     # update or add our "redirect" role with our part name
                     roles['redirect'] = '%s' % device
                     # convert back to json and store in disk object
                     do.role = json.dumps(roles)
                 else:
                     # role=None so just add a json formatted redirect role
                     do.role = '{"redirect": "%s"}' % device.name
             do.save()
             mount_root(po)
         po.raid = pool_raid('%s%s' % (settings.MNT_PT, po.name))['data']
         po.size = po.usage_bound()
         po.save()
         enable_quota(po)
         import_shares(po, request)
         for share in Share.objects.filter(pool=po):
             import_snapshots(share)
         return Response(DiskInfoSerializer(disk).data)
     except Exception as e:
         e_msg = (
             'Failed to import any pool on this device(%s). Error: %s' %
             (dname, e.__str__()))
         handle_exception(Exception(e_msg), request)
Пример #12
0
 def _refresh_pool_state():
     for p in Pool.objects.all():
         if (p.disk_set.count() == 0):
             p.delete()
             continue
         try:
             mount_root(p)
             fd = p.disk_set.first()
             pool_info = get_pool_info(fd.name)
             p.name = pool_info['label']
             p.raid = pool_raid('%s%s' % (settings.MNT_PT, p.name))['data']
             p.size = pool_usage('%s%s' % (settings.MNT_PT, p.name))[0]
             p.save()
         except Exception, e:
             logger.error('Exception while refreshing state for '
                          'Pool(%s). Moving on: %s' %
                          (p.name, e.__str__()))
             logger.exception(e)
Пример #13
0
 def _refresh_pool_state():
     for p in Pool.objects.all():
         # If our pool has no disks, detached included, then delete it.
         # We leave pools with all detached members in place intentionally.
         if (p.disk_set.count() == 0):
             p.delete()
             continue
         # Log if no attached members are found, ie all devs are detached.
         if p.disk_set.attached().count() == 0:
             logger.error('Skipping Pool (%s) mount as there '
                          'are no attached devices. Moving on.' %
                          p.name)
             continue
         try:
             # Get and save what info we can prior to mount.
             first_attached_dev = p.disk_set.attached().first()
             is_root_pool = (p.role == 'root')
             # Observe any redirect role by using target_name.
             byid_disk_name, is_byid = get_dev_byid_name(
                 get_device_path(first_attached_dev.target_name))
             if is_byid:
                 pool_info = get_pool_info(first_attached_dev.target_name,
                                           is_root_pool)
                 pool_name = pool_info['label']
             else:
                 logger.error('Skipping pool ({}) mount as attached disk '
                              '({}) has no by-id name (no serial # ?)'.
                              format(p.name,
                                     first_attached_dev.target_name))
                 continue
             p.name = pool_name
             p.save()
             mount_root(p)
             p.raid = pool_raid('%s%s' % (settings.MNT_PT, p.name))['data']
             p.size = p.usage_bound()
             # Consider using mount_status() parse to update root pool db on
             # active (fstab initiated) compression setting.
             p.save()
         except Exception as e:
             logger.error('Exception while refreshing state for '
                          'Pool(%s). Moving on: %s' %
                          (p.name, e.__str__()))
             logger.exception(e)
Пример #14
0
 def _btrfs_disk_import(self, dname, request):
     try:
         disk = self._validate_disk(dname, request)
         p_info = get_pool_info(dname)
         #get some options from saved config?
         po = Pool(name=p_info['label'], raid="unknown")
         for d in p_info['disks']:
             do = Disk.objects.get(name=d)
             do.pool = po
             do.save()
             mount_root(po, d)
         po.raid = pool_raid('%s%s' % (settings.MNT_PT, po.name))['data']
         po.size = pool_usage('%s%s' % (settings.MNT_PT, po.name))[0]
         po.save()
         return Response(DiskInfoSerializer(disk).data)
     except Exception, e:
         e_msg = ('Failed to import any pool on this device(%s). Error: %s'
                  % (dname, e.__str__()))
         handle_exception(Exception(e_msg), request)
Пример #15
0
 def _btrfs_disk_import(self, dname, request):
     try:
         disk = self._validate_disk(dname, request)
         p_info = get_pool_info(dname)
         #get some options from saved config?
         po = Pool(name=p_info['label'], raid="unknown")
         for d in p_info['disks']:
             do = Disk.objects.get(name=d)
             do.pool = po
             do.save()
             mount_root(po, d)
         po.raid = pool_raid('%s%s' % (settings.MNT_PT, po.name))['data']
         po.size = pool_usage('%s%s' % (settings.MNT_PT, po.name))[0]
         po.save()
         return Response(DiskInfoSerializer(disk).data)
     except Exception, e:
         e_msg = (
             'Failed to import any pool on this device(%s). Error: %s' %
             (dname, e.__str__()))
         handle_exception(Exception(e_msg), request)
Пример #16
0
 def _refresh_pool_state():
     for p in Pool.objects.all():
         # If our pool has no disks, detached included, then delete it.
         # We leave pools with all detached members in place intentionally.
         if (p.disk_set.count() == 0):
             p.delete()
             continue
         # Log if no attached members are found, ie all devs are detached.
         if p.disk_set.attached().count() == 0:
             logger.error('Skipping Pool (%s) mount as there '
                          'are no attached devices. Moving on.' % p.name)
             continue
         try:
             # Get and save what info we can prior to mount.
             first_attached_dev = p.disk_set.attached().first()
             is_root_pool = (p.role == 'root')
             # Observe any redirect role by using target_name.
             byid_disk_name, is_byid = get_dev_byid_name(
                 get_device_path(first_attached_dev.target_name))
             if is_byid:
                 pool_info = get_pool_info(first_attached_dev.target_name,
                                           is_root_pool)
                 pool_name = pool_info['label']
             else:
                 logger.error(
                     'Skipping pool ({}) mount as attached disk '
                     '({}) has no by-id name (no serial # ?)'.format(
                         p.name, first_attached_dev.target_name))
                 continue
             p.name = pool_name
             p.save()
             mount_root(p)
             p.raid = pool_raid('%s%s' % (settings.MNT_PT, p.name))['data']
             p.size = p.usage_bound()
             # Consider using mount_status() parse to update root pool db on
             # active (fstab initiated) compression setting.
             p.save()
         except Exception as e:
             logger.error('Exception while refreshing state for '
                          'Pool(%s). Moving on: %s' % (p.name, e.__str__()))
             logger.exception(e)
Пример #17
0
            try:
                auto_update(enable=False)
                return Response({'enabled': False, })
            except Exception, e:
                msg = ('Failed to disable auto update due to this exception:  '
                       '%s' % e.__str__())
                handle_exception(Exception(msg), request)

        if (command == 'refresh-pool-state'):
            for p in Pool.objects.all():
                fd = p.disk_set.first()
                if (fd is None):
                    p.delete()
                mount_root(p)
                pool_info = get_pool_info(fd.name)
                p.name = pool_info['label']
                p.raid = pool_raid('%s%s' % (settings.MNT_PT, p.name))['data']
                p.size = pool_usage('%s%s' % (settings.MNT_PT, p.name))[0]
                p.save()
            return Response()

        if (command == 'refresh-share-state'):
            for p in Pool.objects.all():
                import_shares(p, request)
            return Response()

        if (command == 'refresh-snapshot-state'):
            for share in Share.objects.all():
                import_snapshots(share)
            return Response()
Пример #18
0
    def test_get_pool_raid_levels_identification(self):
        """Presents the raid identification function with example data and compares
        it's return dict to that expected for the given input.  :return: 'ok'
        if all is as expected or a message indicating which raid level was
        incorrectly identified given the test data.  N.B. Only the first raid
        level fail is indicated, however all are expected to pass anyway so we
        will have to tend to each failure in turn until all pass.

        """
        # setup fake mount point
        mount_point = '/mnt2/fake-pool'
        cmd_rc = 0
        cmd_e = ['']
        # setup example btrfs fi df mount_point outputs for given inputs.
        # Outputs are simple lists of whole lines output from btrfs fi df
        single_fi_df = [
            'Data, single: total=8.00MiB, used=64.00KiB',
            'System, single: total=4.00MiB, used=16.00KiB',
            'Metadata, single: total=216.00MiB, used=128.00KiB',
            'GlobalReserve, single: total=16.00MiB, used=0.00B', ''
        ]
        # Expected return is a dict of extracted info from above command
        # output.
        single_return = {
            'data': 'single',
            'system': 'single',
            'globalreserve': 'single',
            'metadata': 'single'
        }
        raid0_fi_df = [
            'Data, RAID0: total=512.00MiB, used=256.00KiB',
            'System, RAID0: total=16.00MiB, used=16.00KiB',
            'Metadata, RAID0: total=512.00MiB, used=128.00KiB',
            'GlobalReserve, single: total=16.00MiB, used=0.00B', ''
        ]
        raid0_return = {
            'data': 'raid0',
            'system': 'raid0',
            'globalreserve': 'single',
            'metadata': 'raid0'
        }
        raid1_fi_df = [
            'Data, RAID1: total=512.00MiB, used=192.00KiB',
            'System, RAID1: total=32.00MiB, used=16.00KiB',
            'Metadata, RAID1: total=256.00MiB, used=128.00KiB',
            'GlobalReserve, single: total=16.00MiB, used=0.00B', ''
        ]
        raid1_return = {
            'data': 'raid1',
            'system': 'raid1',
            'globalreserve': 'single',
            'metadata': 'raid1'
        }
        # Thanks to @grebnek in forum and GitHub for spotting this:
        # https://btrfs.wiki.kernel.org/index.php/FAQ#Why_do_I_have_.22single.22_chunks_in_my_RAID_filesystem.3F
        # When converting from single to another raid level it is normal for
        # a few chunks to remain in single until the next balance operation.
        raid1_fi_df_some_single_chunks = [
            'Data, RAID1: total=416.00MiB, used=128.00KiB',
            'Data, single: total=416.00MiB, used=0.00B',
            'System, RAID1: total=32.00MiB, used=16.00KiB',
            'Metadata, RAID1: total=512.00MiB, used=128.00KiB',
            'GlobalReserve, single: total=16.00MiB, used=0.00B', ''
        ]
        # but the expected result should be the same as "raid1_return" above
        # ie data raid1 not single.
        raid10_fi_df = [
            'Data, RAID10: total=419.75MiB, used=128.00KiB',
            'System, RAID10: total=16.00MiB, used=16.00KiB',
            'Metadata, RAID10: total=419.75MiB, used=128.00KiB',
            'GlobalReserve, single: total=16.00MiB, used=0.00B', ''
        ]
        raid10_return = {
            'data': 'raid10',
            'system': 'raid10',
            'globalreserve': 'single',
            'metadata': 'raid10'
        }
        raid5_fi_df = [
            'Data, RAID5: total=215.00MiB, used=128.00KiB',
            'System, RAID5: total=8.00MiB, used=16.00KiB',
            'Metadata, RAID5: total=215.00MiB, used=128.00KiB',
            'GlobalReserve, single: total=16.00MiB, used=0.00B', ''
        ]
        raid5_return = {
            'data': 'raid5',
            'system': 'raid5',
            'globalreserve': 'single',
            'metadata': 'raid5'
        }
        raid6_fi_df = [
            'Data, RAID6: total=211.62MiB, used=128.00KiB',
            'System, RAID6: total=8.00MiB, used=16.00KiB',
            'Metadata, RAID6: total=211.62MiB, used=128.00KiB',
            'GlobalReserve, single: total=16.00MiB, used=0.00B', ''
        ]
        raid6_return = {
            'data': 'raid6',
            'system': 'raid6',
            'globalreserve': 'single',
            'metadata': 'raid6'
        }
        # Data to test for correct recognition of the default rockstor_rockstor
        # pool ie:
        default_sys_fi_df = [
            'Data, single: total=3.37GiB, used=2.71GiB',
            'System, DUP: total=8.00MiB, used=16.00KiB',
            'System, single: total=4.00MiB, used=0.00B',
            'Metadata, DUP: total=471.50MiB, used=165.80MiB',
            'Metadata, single: total=8.00MiB, used=0.00B',
            ('GlobalReserve, single: total=64.00MiB, '
             'used=0.00B'), ''
        ]
        default_sys_return = {
            'data': 'single',
            'system': 'dup',
            'globalreserve': 'single',
            'metadata': 'dup'
        }
        # list used to report what raid level is currently under test.
        raid_levels_tested = [
            'single', 'raid0', 'raid1', 'raid10', 'raid5', 'raid6',
            'raid1_some_single_chunks', 'default_sys_pool'
        ]
        # list of example fi_df outputs in raid_levels_tested order
        btrfs_fi_di = [
            single_fi_df, raid0_fi_df, raid1_fi_df, raid10_fi_df, raid5_fi_df,
            raid6_fi_df, raid1_fi_df_some_single_chunks, default_sys_fi_df
        ]
        # list of correctly parsed return dictionaries
        return_dict = [
            single_return, raid0_return, raid1_return, raid10_return,
            raid5_return, raid6_return, raid1_return, default_sys_return
        ]
        # simple iteration over above example inputs to expected outputs.
        for raid_level, fi_df, expected_result in map(None, raid_levels_tested,
                                                      btrfs_fi_di,
                                                      return_dict):
            # mock example command output with no error and rc=0
            self.mock_run_command.return_value = (fi_df, cmd_e, cmd_rc)
            # assert get_pool_raid_level returns what we expect.
            self.assertEqual(pool_raid(mount_point),
                             expected_result,
                             msg='get_pool_raid_level() miss identified raid '
                             'level %s' % raid_level)
Пример #19
0
                return Response({
                    'enabled': False,
                })
            except Exception, e:
                msg = ('Failed to disable auto update due to this exception:  '
                       '%s' % e.__str__())
                handle_exception(Exception(msg), request)

        if (command == 'refresh-pool-state'):
            for p in Pool.objects.all():
                fd = p.disk_set.first()
                if (fd is None):
                    p.delete()
                mount_root(p)
                pool_info = get_pool_info(fd.name)
                p.name = pool_info['label']
                p.raid = pool_raid('%s%s' % (settings.MNT_PT, p.name))['data']
                p.size = pool_usage('%s%s' % (settings.MNT_PT, p.name))[0]
                p.save()
            return Response()

        if (command == 'refresh-share-state'):
            for p in Pool.objects.all():
                import_shares(p, request)
            return Response()

        if (command == 'refresh-snapshot-state'):
            for share in Share.objects.all():
                import_snapshots(share)
            return Response()
Пример #20
0
 def test_get_pool_raid_levels_identification(self):
     """
     Presents the raid identification function with example data and compares
     it's return dict to that expected for the given input.
     :return: 'ok' if all is as expected or a message indicating which raid
     level was incorrectly identified given the test data.
     N.B. Only the first raid level fail is indicated, however all are
     expected to pass anyway so we will have to tend to each failure in turn
     until all pass.
     """
     # setup fake mount point
     mount_point = '/mnt2/fake-pool'
     cmd_rc = 0
     cmd_e = ['']
     # setup example btrfs fi df mount_point outputs for given inputs.
     # Outputs are simple lists of whole lines output from btrfs fi df
     single_fi_df = ['Data, single: total=8.00MiB, used=64.00KiB',
                     'System, single: total=4.00MiB, used=16.00KiB',
                     'Metadata, single: total=216.00MiB, used=128.00KiB',
                     'GlobalReserve, single: total=16.00MiB, used=0.00B', '']
     # Expected return is a dict of extracted info from above command output.
     single_return = {'data': 'single', 'system': 'single',
                      'globalreserve': 'single', 'metadata': 'single'}
     raid0_fi_df = ['Data, RAID0: total=512.00MiB, used=256.00KiB',
                    'System, RAID0: total=16.00MiB, used=16.00KiB',
                    'Metadata, RAID0: total=512.00MiB, used=128.00KiB',
                    'GlobalReserve, single: total=16.00MiB, used=0.00B', '']
     raid0_return = {'data': 'raid0', 'system': 'raid0',
                     'globalreserve': 'single', 'metadata': 'raid0'}
     raid1_fi_df = ['Data, RAID1: total=512.00MiB, used=192.00KiB',
                    'System, RAID1: total=32.00MiB, used=16.00KiB',
                    'Metadata, RAID1: total=256.00MiB, used=128.00KiB',
                    'GlobalReserve, single: total=16.00MiB, used=0.00B', '']
     raid1_return = {'data': 'raid1', 'system': 'raid1',
                     'globalreserve': 'single', 'metadata': 'raid1'}
     # Thanks to @grebnek in forum and GitHub for spotting this:
     # https://btrfs.wiki.kernel.org/index.php/FAQ#Why_do_I_have_.22single.22_chunks_in_my_RAID_filesystem.3F
     # When converting from single to another raid level it is normal for
     # a few chunks to remain in single until the next balance operation.
     raid1_fi_df_some_single_chunks = [
         'Data, RAID1: total=416.00MiB, used=128.00KiB',
         'Data, single: total=416.00MiB, used=0.00B',
         'System, RAID1: total=32.00MiB, used=16.00KiB',
         'Metadata, RAID1: total=512.00MiB, used=128.00KiB',
         'GlobalReserve, single: total=16.00MiB, used=0.00B', '']
     # for a time we incorrectly parsed the last btrfs fi df mount_point as
     # the following:
     raid1_some_single_chunks_return_broken = {'data': 'single',
                                               'system': 'raid1',
                                               'globalreserve': 'single',
                                               'metadata': 'raid1'}
     # but the expected result should be the same as "raid1_return" above
     # ie data raid1 not single.
     raid10_fi_df = ['Data, RAID10: total=419.75MiB, used=128.00KiB',
                     'System, RAID10: total=16.00MiB, used=16.00KiB',
                     'Metadata, RAID10: total=419.75MiB, used=128.00KiB',
                     'GlobalReserve, single: total=16.00MiB, used=0.00B', '']
     raid10_return = {'data': 'raid10', 'system': 'raid10',
                      'globalreserve': 'single', 'metadata': 'raid10'}
     raid5_fi_df = ['Data, RAID5: total=215.00MiB, used=128.00KiB',
                    'System, RAID5: total=8.00MiB, used=16.00KiB',
                    'Metadata, RAID5: total=215.00MiB, used=128.00KiB',
                    'GlobalReserve, single: total=16.00MiB, used=0.00B', '']
     raid5_return = {'data': 'raid5', 'system': 'raid5',
                     'globalreserve': 'single', 'metadata': 'raid5'}
     raid6_fi_df = ['Data, RAID6: total=211.62MiB, used=128.00KiB',
                    'System, RAID6: total=8.00MiB, used=16.00KiB',
                    'Metadata, RAID6: total=211.62MiB, used=128.00KiB',
                    'GlobalReserve, single: total=16.00MiB, used=0.00B', '']
     raid6_return = {'data': 'raid6', 'system': 'raid6',
                     'globalreserve': 'single', 'metadata': 'raid6'}
     # Data to test for correct recognition of the default rockstor_rockstor
     # pool ie:
     default_sys_fi_df = ['Data, single: total=3.37GiB, used=2.71GiB',
                          'System, DUP: total=8.00MiB, used=16.00KiB',
                          'System, single: total=4.00MiB, used=0.00B',
                          'Metadata, DUP: total=471.50MiB, used=165.80MiB',
                          'Metadata, single: total=8.00MiB, used=0.00B',
                          'GlobalReserve, single: total=64.00MiB, used=0.00B',
                          '']
     default_sys_return = {'data': 'single', 'system': 'dup',
                           'globalreserve': 'single', 'metadata': 'dup'}
     # N.B. prior to pr #1408 as of writing this unit test we had a
     # default_sys_return of which was correct for data but not system
     default_sys_return_broken = {'data': 'single', 'system': 'single',
                         'globalreserve': 'single', 'metadata': 'single'}
     # list used to report what raid level is currently under test.
     raid_levels_tested = ['single', 'raid0', 'raid1', 'raid10', 'raid5',
                           'raid6', 'raid1_some_single_chunks',
                           'default_sys_pool']
     # list of example fi_df outputs in raid_levels_tested order
     btrfs_fi_di = [single_fi_df, raid0_fi_df, raid1_fi_df, raid10_fi_df,
                    raid5_fi_df, raid6_fi_df, raid1_fi_df_some_single_chunks,
                    default_sys_fi_df]
     # list of correctly parsed return dictionaries
     return_dict = [single_return, raid0_return, raid1_return, raid10_return,
                    raid5_return, raid6_return, raid1_return,
                    default_sys_return]
     # simple iteration over above example inputs to expected outputs.
     for raid_level, fi_df, expected_result in map(None, raid_levels_tested,
                                                   btrfs_fi_di, return_dict):
         # mock example command output with no error and rc=0
         self.mock_run_command.return_value = (fi_df, cmd_e, cmd_rc)
         # assert get_pool_raid_level returns what we expect.
         self.assertEqual(pool_raid(mount_point), expected_result,
                          msg='get_pool_raid_level() miss identified raid '
                              'level %s' % raid_level)