Beispiel #1
0
 def _quotas(cls, request, pool):
     new_quota_state = cls._validate_new_quota_state(request)
     # If no change from current pool quota state then do nothing
     current_state = "Enabled"
     if not pool.quotas_enabled:
         current_state = "Disabled"
     if new_quota_state == current_state:
         return Response()
     try:
         if new_quota_state == "Enabled":
             # Current issue with requiring enable to be executed twice !!!
             # As of 4.12.4-1.el7.elrepo.x86_64
             # this avoids "ERROR: quota rescan failed: Invalid argument"
             # when attempting a rescan.
             # Look similar to https://patchwork.kernel.org/patch/9928635/
             enable_quota(pool)
             enable_quota(pool)
             # As of 4.12.4-1.el7.elrepo.x86_64
             # The second above enable_quota() call currently initiates a
             # rescan so the following is redundant; however this may not
             # always be the case so leaving as it will auto skip if a scan
             # in in progress anyway.
             rescan_quotas(pool)
         else:
             disable_quota(pool)
     except:
         e_msg = ("Failed to Enable (and rescan) / Disable Quotas for "
                  "Pool ({}). Requested quota state "
                  "was ({}).".format(pool.name, new_quota_state))
         handle_exception(Exception(e_msg), request)
     return Response(PoolInfoSerializer(pool).data)
Beispiel #2
0
 def get(self, *args, **kwargs):
     try:
         pool = Pool.objects.get(id=self.kwargs["pid"])
         serialized_data = PoolInfoSerializer(pool)
         return Response(serialized_data.data)
     except Pool.DoesNotExist:
         return Response(status=status.HTTP_404_NOT_FOUND)
Beispiel #3
0
 def get(self, *args, **kwargs):
     try:
         pool = Pool.objects.get(name=self.kwargs['pname'])
         data = self._refresh_pool_state(pool)
         serialized_data = PoolInfoSerializer(data)
         return Response(serialized_data.data)
     except Pool.DoesNotExist:
         return Response()
Beispiel #4
0
    def _remount(cls, request, pool):
        compression = cls._validate_compression(request)
        mnt_options = cls._validate_mnt_options(request)
        if ((compression == pool.compression
             and mnt_options == pool.mnt_options)):
            return Response()

        with transaction.atomic():
            pool.compression = compression
            pool.mnt_options = mnt_options
            pool.save()

        if (re.search('noatime', mnt_options) is None):
            mnt_options = ('%s,relatime,atime' % mnt_options)

        if (re.search('compress-force', mnt_options) is None):
            mnt_options = ('%s,compress=%s' % (mnt_options, compression))

        with open('/proc/mounts') as mfo:
            mount_map = {}
            for l in mfo.readlines():
                share_name = None
                if (re.search(
                        '%s|%s' % (settings.NFS_EXPORT_ROOT, settings.MNT_PT),
                        l) is not None):
                    share_name = l.split()[1].split('/')[2]
                elif (re.search(settings.SFTP_MNT_ROOT, l) is not None):
                    share_name = l.split()[1].split('/')[3]
                else:
                    continue
                if (share_name not in mount_map):
                    mount_map[share_name] = [
                        l.split()[1],
                    ]
                else:
                    mount_map[share_name].append(l.split()[1])
        failed_remounts = []
        try:
            pool_mnt = '/mnt2/%s' % pool.name
            remount(pool_mnt, mnt_options)
        except:
            failed_remounts.append(pool_mnt)
        for share in mount_map.keys():
            if (Share.objects.filter(pool=pool, name=share).exists()):
                for m in mount_map[share]:
                    try:
                        remount(m, mnt_options)
                    except Exception as e:
                        logger.exception(e)
                        failed_remounts.append(m)
        if (len(failed_remounts) > 0):
            e_msg = ('Failed to remount the following mounts.\n %s\n '
                     'Try again or do the following as root(may cause '
                     'downtime):\n 1. systemctl stop rockstor\n'
                     '2. unmount manually\n3. systemctl start rockstor\n.' %
                     failed_remounts)
            handle_exception(Exception(e_msg), request)
        return Response(PoolInfoSerializer(pool).data)
Beispiel #5
0
    def post(self, request):
        """
        input is a list of disks, raid_level and name of the pool.
        """
        try:
            disks = request.DATA['disks'].split(',')
            pname = request.DATA['pname']
            if (re.match('%s$' % settings.POOL_REGEX, pname) is None):
                e_msg = ('Pool name must start with a letter(a-z) and can'
                         ' be followed by any of the following characters: '
                         'letter(a-z), digits(0-9), hyphen(-), underscore'
                         '(_) or a period(.).')
                handle_exception(Exception(e_msg), request)

            if (Pool.objects.filter(name=pname).exists()):
                e_msg = ('Pool with name: %s already exists.' % pname)
                handle_exception(Exception(e_msg), request)

            for d in disks:
                if (not Disk.objects.filter(name=d).exists()):
                    e_msg = ('Unknown disk: %s' % d)
                    handle_exception(Exception(e_msg), request)

            raid_level = request.DATA['raid_level']
            if (raid_level not in self.RAID_LEVELS):
                e_msg = ('Unsupported raid level. use one of: %s' %
                         self.RAID_LEVELS)
                handle_exception(Exception(e_msg), request)
            if (raid_level in self.RAID_LEVELS[0:2] and len(disks) == 1):
                e_msg = ('More than one disk is required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[2] and len(disks) < 4):
                e_msg = ('Four or more disks are required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[4] and len(disks) < 2):
                e_msg = ('Two or more disks are required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[5] and len(disks) < 3):
                e_msg = ('Three or more disks are required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)

            p = Pool(name=pname, raid=raid_level)
            add_pool(pname, raid_level, raid_level, disks)
            usage = pool_usage(disks[0])
            p.size = usage[0]
            p.save()
            p.disk_set.add(*[Disk.objects.get(name=d) for d in disks])
            return Response(PoolInfoSerializer(p).data)
        except RockStorAPIException:
            raise
        except Exception, e:
            handle_exception(e, request)
Beispiel #6
0
    def _remount(cls, request, pool):
        compression = cls._validate_compression(request)
        mnt_options = cls._validate_mnt_options(request)
        if compression == pool.compression and mnt_options == pool.mnt_options:
            return Response()

        with transaction.atomic():
            pool.compression = compression
            pool.mnt_options = mnt_options
            pool.save()

        if re.search("noatime", mnt_options) is None:
            mnt_options = "{},relatime,atime".format(mnt_options)

        if re.search("compress-force", mnt_options) is None:
            mnt_options = "{},compress={}".format(mnt_options, compression)

        with open("/proc/mounts") as mfo:
            mount_map = {}
            for l in mfo.readlines():
                share_name = None
                if (re.search(
                        "{}|{}".format(settings.NFS_EXPORT_ROOT,
                                       settings.MNT_PT), l) is not None):
                    share_name = l.split()[1].split("/")[2]
                elif re.search(settings.SFTP_MNT_ROOT, l) is not None:
                    share_name = l.split()[1].split("/")[3]
                else:
                    continue
                if share_name not in mount_map:
                    mount_map[share_name] = [l.split()[1]]
                else:
                    mount_map[share_name].append(l.split()[1])
        failed_remounts = []
        try:
            pool_mnt = "/mnt2/{}".format(pool.name)
            remount(pool_mnt, mnt_options)
        except:
            failed_remounts.append(pool_mnt)
        for share in mount_map.keys():
            if Share.objects.filter(pool=pool, name=share).exists():
                for m in mount_map[share]:
                    try:
                        remount(m, mnt_options)
                    except Exception as e:
                        logger.exception(e)
                        failed_remounts.append(m)
        if len(failed_remounts) > 0:
            e_msg = ("Failed to remount the following mounts.\n {}.\n "
                     "Try again or do the following as root (may cause "
                     "downtime):\n1. systemctl stop rockstor.\n"
                     "2. unmount manually.\n"
                     "3. systemctl start rockstor.\n").format(failed_remounts)
            handle_exception(Exception(e_msg), request)
        return Response(PoolInfoSerializer(pool).data)
Beispiel #7
0
    def put(self, request, pname, command):
        """
        resize a pool.
        @pname: pool's name
        @command: 'add' - add a list of disks and hence expand the pool
                  'remove' - remove a list of disks and hence shrink the pool
        """
        try:
            if (not Pool.objects.filter(name=pname).exists()):
                msg = ('pool: %s does not exist' % pname)
                raise Exception(msg)

            disks = request.DATA['disks'].split(',')
            if (len(disks) == 0):
                msg = ('list of disks in the input is empty')
                raise Exception(msg)

            pool = Pool.objects.get(name=pname)
            mount_disk = Disk.objects.filter(pool=pool)[0].name
            if (command == 'add'):
                for d in disks:
                    d_o = Disk.objects.get(name=d)
                    if (d_o.pool is not None):
                        msg = ('disk %s already part of pool %s' %
                               (d, d_o.pool.name))
                        raise Exception(msg)
                    d_o.pool = pool
                    d_o.save()
                resize_pool(pool.name, mount_disk, disks)
            elif (command == 'remove'):
                if (len(Disk.objects.filter(pool=pool)) == 1):
                    msg = (
                        'pool %s had only one disk. use delete command instead'
                    )
                    raise Exception(msg)
                for d in disks:
                    d_o = Disk.objects.get(name=d)
                    if (d_o.pool != pool):
                        msg = ('disk %s not part of pool %s' %
                               (d, d_o.pool.name))
                        raise Exception(msg)
                    d_o.pool = None
                    d_o.save()
                mount_disk = Disk.objects.filter(pool=pool)[0].name
                resize_pool(pool.name, mount_disk, disks, add=False)
            else:
                msg = ('unknown command: %s' % command)
                raise Exception(msg)
            usage = pool_usage(mount_disk)
            pool.size = usage[0]
            pool.save()
            return Response(PoolInfoSerializer(pool).data)

        except Exception, e:
            handle_exception(e, request)
Beispiel #8
0
    def post(self, request):
        """
        input is a list of disks, raid_level and name of the pool.
        """
        try:
            disks = request.DATA['disks'].split(',')
            pname = request.DATA['pname']

            if (Pool.objects.filter(name=pname).exists()):
                e_msg = ('Pool with name: %s already exists.' % pname)
                handle_exception(Exception(e_msg), request)

            for d in disks:
                if (not Disk.objects.filter(name=d).exists()):
                    e_msg = ('Unknown disk: %s' % d)
                    handle_exception(Exception(e_msg), request)

            raid_level = request.DATA['raid_level']
            if (raid_level not in self.RAID_LEVELS):
                e_msg = ('Unsupported raid level. use one of: %s' %
                         self.RAID_LEVELS)
                handle_exception(Exception(e_msg), request)
            if (raid_level in self.RAID_LEVELS[0:2] and len(disks) == 1):
                e_msg = ('More than one disk is required for the chosen raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[2] and len(disks) < 4):
                e_msg = ('Four or more disks are required for the chose raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)

            p = Pool(name=pname, raid=raid_level)
            add_pool(pname, raid_level, raid_level, disks)
            usage = pool_usage(disks[0])
            p.size = usage[0]
            p.save()
            p.disk_set.add(*[Disk.objects.get(name=d) for d in disks])
            return Response(PoolInfoSerializer(p).data)
        except RockStorAPIException:
            raise
        except Exception, e:
            handle_exception(e, request)
Beispiel #9
0
    def post(self, request):
        """
        input is a list of disks, raid_level and name of the pool.
        """
        with self._handle_exception(request):
            disks = [
                self._validate_disk(d, request)
                for d in request.data.get("disks")
            ]
            pname = request.data["pname"]
            if re.match("{}$".format(settings.POOL_REGEX), pname) is None:
                e_msg = ("Invalid characters in pool name. Following "
                         "characters are allowed: letter(a-z or A-Z), "
                         "digit(0-9), "
                         "hyphen(-), underscore(_) or a period(.).")
                handle_exception(Exception(e_msg), request)

            if len(pname) > 255:
                e_msg = "Pool name must be less than 255 characters."
                handle_exception(Exception(e_msg), request)

            if Pool.objects.filter(name=pname).exists():
                e_msg = ("Pool ({}) already exists. Choose a different name."
                         ).format(pname)
                handle_exception(Exception(e_msg), request)

            if Share.objects.filter(name=pname).exists():
                e_msg = ("A share with this name ({}) exists. Pool and share "
                         "names must be distinct. "
                         "Choose a different name.").format(pname)
                handle_exception(Exception(e_msg), request)

            for d in disks:
                if d.btrfs_uuid is not None:
                    e_msg = ("Another BTRFS filesystem exists on this "
                             "disk ({}). "
                             "Erase the disk and try again.").format(d.name)
                    handle_exception(Exception(e_msg), request)

            raid_level = request.data["raid_level"]
            if raid_level not in self.RAID_LEVELS:
                e_msg = ("Unsupported raid level. Use one of: {}.").format(
                    self.RAID_LEVELS)
                handle_exception(Exception(e_msg), request)
            # consolidated raid0 & raid 1 disk check
            if raid_level in self.RAID_LEVELS[1:3] and len(disks) <= 1:
                e_msg = (
                    "At least 2 disks are required for the raid level: {}."
                ).format(raid_level)
                handle_exception(Exception(e_msg), request)
            if raid_level == self.RAID_LEVELS[3]:
                if len(disks) < 4:
                    e_msg = (
                        "A minimum of 4 drives are required for the raid level: {}."
                    ).format(raid_level)
                    handle_exception(Exception(e_msg), request)
            if raid_level == self.RAID_LEVELS[4] and len(disks) < 2:
                e_msg = ("2 or more disks are required for the raid level: {}."
                         ).format(raid_level)
                handle_exception(Exception(e_msg), request)
            if raid_level == self.RAID_LEVELS[5] and len(disks) < 3:
                e_msg = ("3 or more disks are required for the raid level: {}."
                         ).format(raid_level)
                handle_exception(Exception(e_msg), request)

            compression = self._validate_compression(request)
            mnt_options = self._validate_mnt_options(request)
            dnames = self._role_filter_disk_names(disks, request)
            p = Pool(
                name=pname,
                raid=raid_level,
                compression=compression,
                mnt_options=mnt_options,
            )
            p.save()
            p.disk_set.add(*disks)
            # added for loop to save disks appears p.disk_set.add(*disks) was
            # not saving disks in test environment
            for d in disks:
                d.pool = p
                d.save()
            add_pool(p, dnames)
            p.size = p.usage_bound()
            p.uuid = btrfs_uuid(dnames[0])
            p.save()
            # Now we ensure udev info is updated via system wide trigger
            # as per pool resize add, only here it is for a new pool.
            trigger_udev_update()
            return Response(PoolInfoSerializer(p).data)
Beispiel #10
0
    def put(self, request, pid, command):
        """
        resize a pool.
        @pname: pool's name
        @command: 'add' - add a list of disks and hence expand the pool
                  'remove' - remove a list of disks and hence shrink the pool
        """
        with self._handle_exception(request):
            try:
                pool = Pool.objects.get(id=pid)
            except:
                e_msg = ('Pool(%d) does not exist.' % pid)
                handle_exception(Exception(e_msg), request)

            if (pool.role == 'root'):
                e_msg = ('Edit operations are not allowed on this Pool(%d) '
                         'as it contains the operating system.' % pid)
                handle_exception(Exception(e_msg), request)

            if (command == 'remount'):
                return self._remount(request, pool)

            disks = [
                self._validate_disk(d, request)
                for d in request.data.get('disks', [])
            ]
            num_new_disks = len(disks)
            dnames = self._role_filter_disk_names(disks, request)
            new_raid = request.data.get('raid_level', pool.raid)
            num_total_disks = (Disk.objects.filter(pool=pool).count() +
                               num_new_disks)
            if (command == 'add'):
                for d in disks:
                    if (d.pool is not None):
                        e_msg = ('Disk(%s) cannot be added to this Pool(%s) '
                                 'because it belongs to another pool(%s)' %
                                 (d.name, pool.name, d.pool.name))
                        handle_exception(Exception(e_msg), request)
                    if (d.btrfs_uuid is not None):
                        e_msg = ('Disk(%s) has a BTRFS filesystem from the '
                                 'past. If you really like to add it, wipe it '
                                 'from the Storage -> Disks screen of the '
                                 'web-ui' % d.name)
                        handle_exception(Exception(e_msg), request)

                if (pool.raid != 'single' and new_raid == 'single'):
                    e_msg = ('Pool migration from %s to %s is not supported.' %
                             (pool.raid, new_raid))
                    handle_exception(Exception(e_msg), request)

                if (new_raid == 'raid10' and num_total_disks < 4):
                    e_msg = ('A minimum of Four drives are required for the '
                             'raid level: raid10')
                    handle_exception(Exception(e_msg), request)

                if (new_raid == 'raid6' and num_total_disks < 3):
                    e_msg = ('A minimum of Three drives are required for the '
                             'raid level: raid6')
                    handle_exception(Exception(e_msg), request)

                if (new_raid == 'raid5' and num_total_disks < 2):
                    e_msg = ('A minimum of Two drives are required for the '
                             'raid level: raid5')
                    handle_exception(Exception(e_msg), request)

                if (PoolBalance.objects.filter(
                        pool=pool,
                        status__regex=
                        r'(started|running|cancelling|pausing|paused)').exists(
                        )):  # noqa E501
                    e_msg = ('A Balance process is already running or paused '
                             'for this pool(%s). Resize is not supported '
                             'during a balance process.' % pool.name)
                    handle_exception(Exception(e_msg), request)

                resize_pool(pool, dnames)
                tid = self._balance_start(pool, convert=new_raid)
                ps = PoolBalance(pool=pool, tid=tid)
                ps.save()

                pool.raid = new_raid
                for d_o in disks:
                    d_o.pool = pool
                    d_o.save()
                # Now we ensure udev info is updated via system wide trigger
                trigger_udev_update()
            elif (command == 'remove'):
                if (new_raid != pool.raid):
                    e_msg = ('Raid configuration cannot be changed while '
                             'removing disks')
                    handle_exception(Exception(e_msg), request)
                for d in disks:
                    if (d.pool is None or d.pool != pool):
                        e_msg = ('Disk(%s) cannot be removed because it does '
                                 'not belong to this Pool(%s)' %
                                 (d.name, pool.name))
                        handle_exception(Exception(e_msg), request)
                remaining_disks = (Disk.objects.filter(pool=pool).count() -
                                   num_new_disks)
                if (pool.raid == 'raid0'):
                    e_msg = ('Disks cannot be removed from a pool with this '
                             'raid(%s) configuration' % pool.raid)
                    handle_exception(Exception(e_msg), request)

                if (pool.raid == 'raid1' and remaining_disks < 2):
                    e_msg = ('Disks cannot be removed from this pool '
                             'because its raid configuration(raid1) '
                             'requires a minimum of 2 disks')
                    handle_exception(Exception(e_msg), request)

                if (pool.raid == 'raid10' and remaining_disks < 4):
                    e_msg = ('Disks cannot be removed from this pool '
                             'because its raid configuration(raid10) '
                             'requires a minimum of 4 disks')
                    handle_exception(Exception(e_msg), request)

                if (pool.raid == 'raid5' and remaining_disks < 2):
                    e_msg = ('Disks cannot be removed from this pool because '
                             'its raid configuration(raid5) requires a '
                             'minimum of 2 disks')
                    handle_exception(Exception(e_msg), request)

                if (pool.raid == 'raid6' and remaining_disks < 3):
                    e_msg = ('Disks cannot be removed from this pool because '
                             'its raid configuration(raid6) requires a '
                             'minimum of 3 disks')
                    handle_exception(Exception(e_msg), request)

                usage = pool_usage('/%s/%s' % (settings.MNT_PT, pool.name))
                size_cut = 0
                for d in disks:
                    size_cut += d.size
                if size_cut >= (pool.size - usage):
                    e_msg = ('Removing these(%s) disks may shrink the pool by '
                             '%dKB, which is greater than available free space'
                             ' %dKB. This is not supported.' %
                             (dnames, size_cut, usage))
                    handle_exception(Exception(e_msg), request)

                resize_pool(pool, dnames, add=False)
                tid = self._balance_start(pool)
                ps = PoolBalance(pool=pool, tid=tid)
                ps.save()

                for d in disks:
                    d.pool = None
                    d.save()

            else:
                e_msg = ('command(%s) is not supported.' % command)
                handle_exception(Exception(e_msg), request)
            pool.size = pool.usage_bound()
            pool.save()
            return Response(PoolInfoSerializer(pool).data)
Beispiel #11
0
    def post(self, request):
        """
        input is a list of disks, raid_level and name of the pool.
        """
        with self._handle_exception(request):
            disks = [
                self._validate_disk(d, request)
                for d in request.data.get('disks')
            ]
            pname = request.data['pname']
            if (re.match('%s$' % settings.POOL_REGEX, pname) is None):
                e_msg = ('Invalid characters in Pool name. Following '
                         'characters are allowed: letter(a-z or A-Z), '
                         'digit(0-9), '
                         'hyphen(-), underscore(_) or a period(.).')
                handle_exception(Exception(e_msg), request)

            if (len(pname) > 255):
                e_msg = ('Pool name must be less than 255 characters')
                handle_exception(Exception(e_msg), request)

            if (Pool.objects.filter(name=pname).exists()):
                e_msg = ('Pool(%s) already exists. Choose a different name' %
                         pname)
                handle_exception(Exception(e_msg), request)

            if (Share.objects.filter(name=pname).exists()):
                e_msg = ('A Share with this name(%s) exists. Pool and Share '
                         'names '
                         'must be distinct. Choose a different name' % pname)
                handle_exception(Exception(e_msg), request)

            for d in disks:
                if (d.btrfs_uuid is not None):
                    e_msg = ('Another BTRFS filesystem exists on this '
                             'disk(%s). Erase the disk and try again.' %
                             d.name)
                    handle_exception(Exception(e_msg), request)

            raid_level = request.data['raid_level']
            if (raid_level not in self.RAID_LEVELS):
                e_msg = ('Unsupported raid level. use one of: {}'.format(
                    self.RAID_LEVELS))
                handle_exception(Exception(e_msg), request)
            # consolidated raid0 & raid 1 disk check
            if (raid_level in self.RAID_LEVELS[1:3] and len(disks) <= 1):
                e_msg = ('At least two disks are required for the raid level: '
                         '%s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[3]):
                if (len(disks) < 4):
                    e_msg = ('A minimum of Four drives are required for the '
                             'raid level: %s' % raid_level)
                    handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[4] and len(disks) < 2):
                e_msg = ('Two or more disks are required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[5] and len(disks) < 3):
                e_msg = ('Three or more disks are required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)

            compression = self._validate_compression(request)
            mnt_options = self._validate_mnt_options(request)
            dnames = self._role_filter_disk_names(disks, request)
            p = Pool(name=pname,
                     raid=raid_level,
                     compression=compression,
                     mnt_options=mnt_options)
            p.save()
            p.disk_set.add(*disks)
            # added for loop to save disks appears p.disk_set.add(*disks) was
            # not saving disks in test environment
            for d in disks:
                d.pool = p
                d.save()
            add_pool(p, dnames)
            p.size = p.usage_bound()
            p.uuid = btrfs_uuid(dnames[0])
            p.save()
            # Now we ensure udev info is updated via system wide trigger
            # as per pool resize add, only here it is for a new pool.
            trigger_udev_update()
            return Response(PoolInfoSerializer(p).data)
Beispiel #12
0
class PoolMixin(object):
    serializer_class = PoolInfoSerializer
    RAID_LEVELS = ('single', 'raid0', 'raid1', 'raid10', 'raid5', 'raid6')

    @staticmethod
    def _validate_disk(d, request):
        try:
            return Disk.objects.get(name=d)
        except:
            e_msg = ('Disk(%s) does not exist' % d)
            handle_exception(Exception(e_msg), request)

    @staticmethod
    def _validate_compression(request):
        compression = request.data.get('compression', 'no')
        if (compression is None):
            compression = 'no'
        if (compression not in settings.COMPRESSION_TYPES):
            e_msg = ('Unsupported compression algorithm(%s). Use one of '
                     '%s' % (compression, settings.COMPRESSION_TYPES))
            handle_exception(Exception(e_msg), request)
        return compression

    @staticmethod
    def _validate_mnt_options(request):
        mnt_options = request.data.get('mnt_options', None)
        if (mnt_options is None):
            return ''
        allowed_options = {
            'alloc_start': int,
            'autodefrag': None,
            'clear_cache': None,
            'commit': int,
            'compress-force': settings.COMPRESSION_TYPES,
            'discard': None,
            'fatal_errors': None,
            'inode_cache': None,
            'max_inline': int,
            'metadata_ratio': int,
            'noacl': None,
            'nodatacow': None,
            'nodatasum': None,
            'nospace_cache': None,
            'space_cache': None,
            'ssd': None,
            'nossd': None,
            'ssd_spread': None,
            'thread_pool': int,
            'noatime': None,
            '': None,
        }
        o_fields = mnt_options.split(',')
        for o in o_fields:
            v = None
            if (re.search('=', o) is not None):
                o, v = o.split('=')
            if (o not in allowed_options):
                e_msg = ('mount option(%s) not allowed. Make sure there are '
                         'no whitespaces in the input. Allowed options: %s' %
                         (o, allowed_options.keys()))
                handle_exception(Exception(e_msg), request)
            if ((o == 'compress-force'
                 and v not in allowed_options['compress-force'])):
                e_msg = ('compress-force is only allowed with {}'.format(
                    settings.COMPRESSION_TYPES))
                handle_exception(Exception(e_msg), request)
            # changed conditional from "if (type(allowed_options[o]) is int):"
            if (allowed_options[o] is int):
                try:
                    int(v)
                except:
                    e_msg = ('Value for mount option(%s) must be an integer' %
                             (o))
                    handle_exception(Exception(e_msg), request)
        return mnt_options

    @classmethod
    def _remount(cls, request, pool):
        compression = cls._validate_compression(request)
        mnt_options = cls._validate_mnt_options(request)
        if ((compression == pool.compression
             and mnt_options == pool.mnt_options)):
            return Response()

        with transaction.atomic():
            pool.compression = compression
            pool.mnt_options = mnt_options
            pool.save()

        if (re.search('noatime', mnt_options) is None):
            mnt_options = ('%s,relatime,atime' % mnt_options)

        if (re.search('compress-force', mnt_options) is None):
            mnt_options = ('%s,compress=%s' % (mnt_options, compression))

        with open('/proc/mounts') as mfo:
            mount_map = {}
            for l in mfo.readlines():
                share_name = None
                if (re.search(
                        '%s|%s' % (settings.NFS_EXPORT_ROOT, settings.MNT_PT),
                        l) is not None):
                    share_name = l.split()[1].split('/')[2]
                elif (re.search(settings.SFTP_MNT_ROOT, l) is not None):
                    share_name = l.split()[1].split('/')[3]
                else:
                    continue
                if (share_name not in mount_map):
                    mount_map[share_name] = [
                        l.split()[1],
                    ]
                else:
                    mount_map[share_name].append(l.split()[1])
        failed_remounts = []
        try:
            pool_mnt = '/mnt2/%s' % pool.name
            remount(pool_mnt, mnt_options)
        except:
            failed_remounts.append(pool_mnt)
        for share in mount_map.keys():
            if (Share.objects.filter(pool=pool, name=share).exists()):
                for m in mount_map[share]:
                    try:
                        remount(m, mnt_options)
                    except Exception, e:
                        logger.exception(e)
                        failed_remounts.append(m)
        if (len(failed_remounts) > 0):
            e_msg = ('Failed to remount the following mounts.\n %s\n '
                     'Try again or do the following as root(may cause '
                     'downtime):\n 1. systemctl stop rockstor\n'
                     '2. unmount manually\n3. systemctl start rockstor\n.' %
                     failed_remounts)
            handle_exception(Exception(e_msg), request)
        return Response(PoolInfoSerializer(pool).data)
Beispiel #13
0
    def put(self, request, pid, command):
        """
        resize a pool.
        @pname: pool's name
        @command: 'add' - add a list of disks and hence expand the pool
                  'remove' - remove a list of disks and hence shrink the pool
        """
        with self._handle_exception(request):
            try:
                pool = Pool.objects.get(id=pid)
            except:
                e_msg = 'Pool with id ({}) does not exist.'.format(pid)
                handle_exception(Exception(e_msg), request)

            if (pool.role == 'root' and command != 'quotas'):
                e_msg = ('Edit operations are not allowed on this pool ({}) '
                         'as it contains the operating '
                         'system.').format(pool.name)
                handle_exception(Exception(e_msg), request)

            if (command == 'remount'):
                return self._remount(request, pool)

            if (command == 'quotas'):
                return self._quotas(request, pool)

            disks = [
                self._validate_disk(d, request)
                for d in request.data.get('disks', [])
            ]
            num_new_disks = len(disks)
            dnames = self._role_filter_disk_names(disks, request)
            new_raid = request.data.get('raid_level', pool.raid)
            num_total_disks = (Disk.objects.filter(pool=pool).count() +
                               num_new_disks)
            if (command == 'add'):
                for d in disks:
                    if (d.pool is not None):
                        e_msg = ('Disk ({}) cannot be added to this pool ({}) '
                                 'because it belongs to another pool ({})'
                                 '.').format(d.name, pool.name, d.pool.name)
                        handle_exception(Exception(e_msg), request)
                    if (d.btrfs_uuid is not None):
                        e_msg = ('Disk ({}) has a BTRFS filesystem from the '
                                 'past. If you really like to add it, wipe it '
                                 'from the Storage -> Disks screen of the '
                                 'web-ui.').format(d.name)
                        handle_exception(Exception(e_msg), request)

                if pool.raid == 'single' and new_raid == 'raid10':
                    # TODO: Consider removing once we have better space calc.
                    # Avoid extreme raid level change upwards (space issues).
                    e_msg = ('Pool migration from {} to {} is not '
                             'supported.').format(pool.raid, new_raid)
                    handle_exception(Exception(e_msg), request)

                if (new_raid == 'raid10' and num_total_disks < 4):
                    e_msg = ('A minimum of 4 drives are required for the '
                             'raid level: raid10.')
                    handle_exception(Exception(e_msg), request)

                if (new_raid == 'raid6' and num_total_disks < 3):
                    e_msg = ('A minimum of 3 drives are required for the '
                             'raid level: raid6.')
                    handle_exception(Exception(e_msg), request)

                if (new_raid == 'raid5' and num_total_disks < 2):
                    e_msg = ('A minimum of 2 drives are required for the '
                             'raid level: raid5.')
                    handle_exception(Exception(e_msg), request)

                if (PoolBalance.objects.filter(
                        pool=pool,
                        status__regex=
                        r'(started|running|cancelling|pausing|paused)').exists(
                        )):  # noqa E501
                    e_msg = ('A Balance process is already running or paused '
                             'for this pool ({}). Resize is not supported '
                             'during a balance process.').format(pool.name)
                    handle_exception(Exception(e_msg), request)

                resize_pool(pool, dnames)
                # During dev add we also offer raid level change, if selected
                # blanket apply '-f' to allow for reducing metadata integrity.
                force = False
                if new_raid != pool.raid:
                    force = True
                tid = self._balance_start(pool, force=force, convert=new_raid)
                ps = PoolBalance(pool=pool, tid=tid)
                ps.save()

                pool.raid = new_raid
                for d_o in disks:
                    d_o.pool = pool
                    d_o.save()
                # Now we ensure udev info is updated via system wide trigger
                trigger_udev_update()
            elif (command == 'remove'):
                if (new_raid != pool.raid):
                    e_msg = ('Raid configuration cannot be changed while '
                             'removing disks.')
                    handle_exception(Exception(e_msg), request)
                for d in disks:
                    if (d.pool is None or d.pool != pool):
                        e_msg = ('Disk ({}) cannot be removed because it does '
                                 'not belong to this '
                                 'pool ({}).').format(d.name, pool.name)
                        handle_exception(Exception(e_msg), request)
                remaining_disks = (Disk.objects.filter(pool=pool).count() -
                                   num_new_disks)
                if (pool.raid == 'raid0'):
                    e_msg = ('Disks cannot be removed from a pool with this '
                             'raid ({}) configuration.').format(pool.raid)
                    handle_exception(Exception(e_msg), request)

                if (pool.raid == 'raid1' and remaining_disks < 2):
                    e_msg = ('Disks cannot be removed from this pool '
                             'because its raid configuration (raid1) '
                             'requires a minimum of 2 disks.')
                    handle_exception(Exception(e_msg), request)

                if (pool.raid == 'raid10' and remaining_disks < 4):
                    e_msg = ('Disks cannot be removed from this pool '
                             'because its raid configuration (raid10) '
                             'requires a minimum of 4 disks.')
                    handle_exception(Exception(e_msg), request)

                if (pool.raid == 'raid5' and remaining_disks < 2):
                    e_msg = ('Disks cannot be removed from this pool because '
                             'its raid configuration (raid5) requires a '
                             'minimum of 2 disks.')
                    handle_exception(Exception(e_msg), request)

                if (pool.raid == 'raid6' and remaining_disks < 3):
                    e_msg = ('Disks cannot be removed from this pool because '
                             'its raid configuration (raid6) requires a '
                             'minimum of 3 disks.')
                    handle_exception(Exception(e_msg), request)

                usage = pool_usage('/%s/%s' % (settings.MNT_PT, pool.name))
                size_cut = 0
                for d in disks:
                    size_cut += d.size
                if size_cut >= (pool.size - usage):
                    e_msg = ('Removing disks ({}) may shrink the pool by '
                             '{} KB, which is greater than available free '
                             'space {} KB. This is '
                             'not supported.').format(dnames, size_cut, usage)
                    handle_exception(Exception(e_msg), request)

                resize_pool(pool, dnames, add=False)
                tid = self._balance_start(pool)
                ps = PoolBalance(pool=pool, tid=tid)
                ps.save()

                for d in disks:
                    d.pool = None
                    d.save()

            else:
                e_msg = 'Command ({}) is not supported.'.format(command)
                handle_exception(Exception(e_msg), request)
            pool.size = pool.usage_bound()
            pool.save()
            return Response(PoolInfoSerializer(pool).data)
Beispiel #14
0
    def put(self, request, pname, command):
        """
        resize a pool.
        @pname: pool's name
        @command: 'add' - add a list of disks and hence expand the pool
                  'remove' - remove a list of disks and hence shrink the pool
        """
        with self._handle_exception(request):
            if (pname == settings.ROOT_POOL):
                e_msg = ('Edit operations are not allowed on this Pool(%s) '
                         'as it contains the operating system.' % pname)
                handle_exception(Exception(e_msg), request)
            try:
                pool = Pool.objects.get(name=pname)
            except:
                e_msg = ('Pool(%s) does not exist.' % pname)
                handle_exception(Exception(e_msg), request)

            if (command == 'remount'):
                return self._remount(request, pool)

            disks = [
                self._validate_disk(d, request)
                for d in request.data.get('disks')
            ]
            num_new_disks = len(disks)
            if (num_new_disks == 0):
                e_msg = ('List of disks in the input cannot be empty.')
                handle_exception(Exception(e_msg), request)
            dnames = [d.name for d in disks]
            mount_disk = Disk.objects.filter(pool=pool)[0].name
            new_raid = request.data.get('raid_level', pool.raid)
            num_total_disks = (Disk.objects.filter(pool=pool).count() +
                               num_new_disks)
            usage = pool_usage('/%s/%s' % (settings.MNT_PT, pool.name))
            # free_percent = (usage[2]/usage[0]) * 100
            free_percent = (usage[2] * 100) / usage[0]
            threshold_percent = self.ADD_THRESHOLD * 100
            if (command == 'add'):
                for d in disks:
                    if (d.pool is not None):
                        e_msg = ('Disk(%s) cannot be added to this Pool(%s) '
                                 'because it belongs to another pool(%s)' %
                                 (d.name, pool.name, d.pool.name))
                        handle_exception(Exception(e_msg), request)
                    if (d.btrfs_uuid is not None):
                        e_msg = ('Disk(%s) has a BTRFS filesystem from the '
                                 'past. If you really like to add it, wipe it '
                                 'from the Storage -> Disks screen of the '
                                 'web-ui' % d.name)
                        handle_exception(Exception(e_msg), request)
                if (new_raid not in self.SUPPORTED_MIGRATIONS[pool.raid]):
                    e_msg = ('Pool migration from %s to %s is not supported.' %
                             (pool.raid, new_raid))
                    handle_exception(Exception(e_msg), request)

                if (PoolBalance.objects.filter(
                        pool=pool,
                        status__regex=r'(started|running)').exists()):
                    e_msg = ('A Balance process is already running for this '
                             'pool(%s). Resize is not supported during a '
                             'balance process.' % pool.name)
                    handle_exception(Exception(e_msg), request)

                if (free_percent < threshold_percent):
                    e_msg = ('Resize is only supported when there is at least '
                             '%d percent free space available. But currently '
                             'only %d percent is free. Remove some data and '
                             'try again.' % (threshold_percent, free_percent))
                    handle_exception(Exception(e_msg), request)

                if (new_raid != pool.raid):
                    if (((pool.raid in ('single', 'raid0'))
                         and new_raid in ('raid1', 'raid10'))):
                        cur_num_disks = num_total_disks - num_new_disks
                        if (num_new_disks < cur_num_disks):
                            e_msg = ('For single/raid0 to raid1/raid10 '
                                     'conversion, at least as many as present '
                                     'number of disks must be added. %d '
                                     'disks are provided, but at least %d are '
                                     'required.' %
                                     (num_new_disks, cur_num_disks))
                            handle_exception(Exception(e_msg), request)

                resize_pool(pool, mount_disk, dnames)
                balance_pid = balance_start(pool, mount_disk, convert=new_raid)
                ps = PoolBalance(pool=pool, pid=balance_pid)
                ps.save()

                pool.raid = new_raid
                for d_o in disks:
                    d_o.pool = pool
                    d_o.save()

            elif (command == 'remove'):
                if (new_raid != pool.raid):
                    e_msg = ('Raid configuration cannot be changed while '
                             'removing disks')
                    handle_exception(Exception(e_msg), request)
                for d in disks:
                    if (d.pool is None or d.pool != pool):
                        e_msg = ('Disk(%s) cannot be removed because it does '
                                 'not belong to this Pool(%s)' %
                                 (d.name, pool.name))
                        handle_exception(Exception(e_msg), request)
                remaining_disks = (Disk.objects.filter(pool=pool).count() -
                                   num_new_disks)
                if (pool.raid in (
                        'raid0',
                        'single',
                )):
                    e_msg = ('Disks cannot be removed from a pool with this '
                             'raid(%s) configuration' % pool.raid)
                    handle_exception(Exception(e_msg), request)

                if (pool.raid in (
                        'raid5',
                        'raid6',
                )):
                    e_msg = ('Disk removal is not supported for pools with '
                             'raid5/6 configuration')
                    handle_exception(Exception(e_msg), request)

                if (pool.raid == 'raid10'):
                    if (num_new_disks != 2):
                        e_msg = ('Only two disks can be removed at once from '
                                 'this pool because of its raid '
                                 'configuration(%s)' % pool.raid)
                        handle_exception(Exception(e_msg), request)
                    elif (remaining_disks < 4):
                        e_msg = ('Disks cannot be removed from this pool '
                                 'because its raid configuration(%s) '
                                 'requires a minimum of 4 disks' % pool.raid)
                        handle_exception(Exception(e_msg), request)

                elif (pool.raid == 'raid1'):
                    if (num_new_disks != 1):
                        e_msg = ('Only one disk can be removed at once from '
                                 'this pool because of its raid '
                                 'configuration(%s)' % pool.raid)
                        handle_exception(Exception(e_msg), request)
                    elif (remaining_disks < 2):
                        e_msg = ('Disks cannot be removed from this pool '
                                 'because its raid configuration(%s) '
                                 'requires a minimum of 2 disks' % pool.raid)
                        handle_exception(Exception(e_msg), request)

                threshold_percent = 100 - threshold_percent
                if (free_percent < threshold_percent):
                    e_msg = ('Removing disks is only supported when there is '
                             'at least %d percent free space available. But '
                             'currently only %d percent is free. Remove some '
                             'data and try again.' %
                             (threshold_percent, free_percent))
                    handle_exception(Exception(e_msg), request)

                resize_pool(pool, mount_disk, dnames, add=False)
                balance_pid = balance_start(pool, mount_disk)
                ps = PoolBalance(pool=pool, pid=balance_pid)
                ps.save()

                for d in disks:
                    d.pool = None
                    d.save()

            else:
                e_msg = ('command(%s) is not supported.' % command)
                handle_exception(Exception(e_msg), request)
            usage = pool_usage('/%s/%s' % (settings.MNT_PT, pool.name))
            pool.size = usage[0]
            pool.save()
            return Response(PoolInfoSerializer(pool).data)
Beispiel #15
0
    def put(self, request, pid, command):
        """
        resize a pool.
        @pname: pool's name
        @command: 'add' - add a list of disks and hence expand the pool
                  'remove' - remove a list of disks and hence shrink the pool
                  'remount' - remount the pool, to apply changed mount options
                  'quotas' - request pool quota setting change
        """
        with self._handle_exception(request):
            try:
                pool = Pool.objects.get(id=pid)
            except:
                e_msg = 'Pool with id ({}) does not exist.'.format(pid)
                handle_exception(Exception(e_msg), request)

            if (pool.role == 'root' and command != 'quotas'):
                e_msg = ('Edit operations are not allowed on this pool ({}) '
                         'as it contains the operating '
                         'system.').format(pool.name)
                handle_exception(Exception(e_msg), request)

            if (command == 'remount'):
                return self._remount(request, pool)

            if (command == 'quotas'):
                # There is a pending btrfs change that allows for quota state
                # change on unmounted Volumes (pools).
                return self._quotas(request, pool)

            if not pool.is_mounted:
                e_msg = ('Pool member / raid edits require an active mount. '
                         'Please see the "Maintenance required" section.')
                handle_exception(Exception(e_msg), request)

            if command == 'remove' and \
                    request.data.get('disks', []) == ['missing']:
                disks = []
                logger.debug('Remove missing request skipping disk validation')
            else:
                disks = [
                    self._validate_disk_id(diskId, request)
                    for diskId in request.data.get('disks', [])
                ]

            num_disks_selected = len(disks)
            dnames = self._role_filter_disk_names(disks, request)
            new_raid = request.data.get('raid_level', pool.raid)

            if (command == 'add'):
                # Only attached disks can be selected during an add operation.
                num_total_attached_disks = pool.disk_set.attached().count() \
                                  + num_disks_selected
                for d in disks:
                    if (d.pool is not None):
                        e_msg = ('Disk ({}) cannot be added to this pool ({}) '
                                 'because it belongs to another pool ({})'
                                 '.').format(d.name, pool.name, d.pool.name)
                        handle_exception(Exception(e_msg), request)
                    if (d.btrfs_uuid is not None):
                        e_msg = ('Disk ({}) has a BTRFS filesystem from the '
                                 'past. If you really like to add it, wipe it '
                                 'from the Storage -> Disks screen of the '
                                 'web-ui.').format(d.name)
                        handle_exception(Exception(e_msg), request)

                if pool.raid == 'single' and new_raid == 'raid10':
                    # TODO: Consider removing once we have better space calc.
                    # Avoid extreme raid level change upwards (space issues).
                    e_msg = ('Pool migration from {} to {} is not '
                             'supported.').format(pool.raid, new_raid)
                    handle_exception(Exception(e_msg), request)

                if (new_raid == 'raid10' and num_total_attached_disks < 4):
                    e_msg = ('A minimum of 4 drives are required for the '
                             'raid level: raid10.')
                    handle_exception(Exception(e_msg), request)

                if (new_raid == 'raid6' and num_total_attached_disks < 3):
                    e_msg = ('A minimum of 3 drives are required for the '
                             'raid level: raid6.')
                    handle_exception(Exception(e_msg), request)

                if (new_raid == 'raid5' and num_total_attached_disks < 2):
                    e_msg = ('A minimum of 2 drives are required for the '
                             'raid level: raid5.')
                    handle_exception(Exception(e_msg), request)

                if (PoolBalance.objects.filter(
                        pool=pool,
                        status__regex=
                        r'(started|running|cancelling|pausing|paused)').exists(
                        )):  # noqa E501
                    e_msg = ('A Balance process is already running or paused '
                             'for this pool ({}). Resize is not supported '
                             'during a balance process.').format(pool.name)
                    handle_exception(Exception(e_msg), request)

                # _resize_pool_start() add dev mode is quick so no async or tid
                self._resize_pool_start(pool, dnames)
                force = False
                # During dev add we also offer raid level change, if selected
                # blanket apply '-f' to allow for reducing metadata integrity.
                if new_raid != pool.raid:
                    force = True
                # Django-ztask initialization as balance is long running.
                tid = self._balance_start(pool, force=force, convert=new_raid)
                ps = PoolBalance(pool=pool, tid=tid)
                ps.save()

                pool.raid = new_raid
                for d_o in disks:
                    d_o.pool = pool
                    d_o.save()
                # Now we ensure udev info is updated via system wide trigger
                trigger_udev_update()
            elif (command == 'remove'):
                if (new_raid != pool.raid):
                    e_msg = ('Raid configuration cannot be changed while '
                             'removing disks.')
                    handle_exception(Exception(e_msg), request)
                detached_disks_selected = 0
                for d in disks:  # to be removed
                    if (d.pool is None or d.pool != pool):
                        e_msg = ('Disk ({}) cannot be removed because it does '
                                 'not belong to this '
                                 'pool ({}).').format(d.name, pool.name)
                        handle_exception(Exception(e_msg), request)
                    if re.match('detached-', d.name) is not None:
                        detached_disks_selected += 1
                if detached_disks_selected >= 3:
                    # Artificial constraint but no current btrfs raid level yet
                    # allows for > 2 dev detached and we have a mounted vol.
                    e_msg = ('We currently only support removing two'
                             'detached disks at a time.')
                    handle_exception(Exception(e_msg), request)
                attached_disks_selected = (num_disks_selected -
                                           detached_disks_selected)
                remaining_attached_disks = (pool.disk_set.attached().count() -
                                            attached_disks_selected)
                if (pool.raid == 'raid0'):
                    e_msg = ('Disks cannot be removed from a pool with this '
                             'raid ({}) configuration.').format(pool.raid)
                    handle_exception(Exception(e_msg), request)

                if (pool.raid == 'raid1' and remaining_attached_disks < 2):
                    e_msg = ('Disks cannot be removed from this pool '
                             'because its raid configuration (raid1) '
                             'requires a minimum of 2 disks.')
                    handle_exception(Exception(e_msg), request)

                if (pool.raid == 'raid10' and remaining_attached_disks < 4):
                    e_msg = ('Disks cannot be removed from this pool '
                             'because its raid configuration (raid10) '
                             'requires a minimum of 4 disks.')
                    handle_exception(Exception(e_msg), request)

                if (pool.raid == 'raid5' and remaining_attached_disks < 2):
                    e_msg = ('Disks cannot be removed from this pool because '
                             'its raid configuration (raid5) requires a '
                             'minimum of 2 disks.')
                    handle_exception(Exception(e_msg), request)

                if (pool.raid == 'raid6' and remaining_attached_disks < 3):
                    e_msg = ('Disks cannot be removed from this pool because '
                             'its raid configuration (raid6) requires a '
                             'minimum of 3 disks.')
                    handle_exception(Exception(e_msg), request)

                usage = pool_usage('/%s/%s' % (settings.MNT_PT, pool.name))
                size_cut = 0
                for d in disks:
                    size_cut += d.allocated
                if size_cut >= (pool.size - usage):
                    e_msg = ('Removing disks ({}) may shrink the pool by '
                             '{} KB, which is greater than available free '
                             'space {} KB. This is '
                             'not supported.').format(dnames, size_cut, usage)
                    handle_exception(Exception(e_msg), request)

                # Unlike resize_pool_start() with add=True a remove has an
                # implicit balance where the removed disks contents are
                # re-distributed across the remaining pool members.
                # This internal balance cannot currently be monitored by the
                # usual 'btrfs balance status /mnt_pt' command. So we have to
                # use our own mechanism to assess it's status.
                # Django-ztask initialization:
                tid = self._resize_pool_start(pool, dnames, add=False)
                ps = PoolBalance(pool=pool, tid=tid, internal=True)
                ps.save()

                # Setting disk.pool = None for all removed members is redundant
                # as our next disk scan will re-find them until such time as
                # our async task, and it's associated dev remove, has completed
                # it's internal balance. This can take hours.

            else:
                e_msg = 'Command ({}) is not supported.'.format(command)
                handle_exception(Exception(e_msg), request)
            pool.size = pool.usage_bound()
            pool.save()
            return Response(PoolInfoSerializer(pool).data)
Beispiel #16
0
    def put(self, request, pid, command):
        """
        resize a pool.
        @pname: pool's name
        @command: 'add' - add a list of disks and hence expand the pool
                  'remove' - remove a list of disks and hence shrink the pool
                  'remount' - remount the pool, to apply changed mount options
                  'quotas' - request pool quota setting change
        """
        with self._handle_exception(request):
            try:
                pool = Pool.objects.get(id=pid)
            except:
                e_msg = "Pool with id ({}) does not exist.".format(pid)
                handle_exception(Exception(e_msg), request)

            if pool.role == "root" and command != "quotas":
                e_msg = ("Edit operations are not allowed on this pool ({}) "
                         "as it contains the operating "
                         "system.").format(pool.name)
                handle_exception(Exception(e_msg), request)

            if command == "remount":
                return self._remount(request, pool)

            if command == "quotas":
                # There is a pending btrfs change that allows for quota state
                # change on unmounted Volumes (pools).
                return self._quotas(request, pool)

            # Establish missing and detached disk removal request flag defaults:
            remove_missing_disk_request = False
            all_members_detached = False
            if command == "remove" and request.data.get("disks",
                                                        []) == ["missing"]:
                remove_missing_disk_request = True
            if (pool.disk_set.filter(name__startswith="detached-").count() ==
                    pool.disk_set.count()):
                all_members_detached = True

            if not pool.is_mounted:
                # If we are asked to remove the last disk in a pool and it's detached
                # then user has already been notified to not remove it if it's to be
                # re-attached. So skip our mount exception as not possible anyway unless
                # re-attached and we have already indicated that possible path.
                # All works accounts for all pool members in detached state.
                if all_members_detached:
                    logger.info(
                        "Skipping mount requirement: all pool's member are detached."
                    )
                else:
                    e_msg = (
                        "Pool member / raid edits require an active mount. "
                        'Please see the "Maintenance required" section.')
                    handle_exception(Exception(e_msg), request)

            if remove_missing_disk_request:
                disks = []
                logger.debug(
                    "Remove missing request, so skipping disk validation")
            else:
                disks = [
                    self._validate_disk_id(diskId, request)
                    for diskId in request.data.get("disks", [])
                ]

            num_disks_selected = len(disks)
            dnames = self._role_filter_disk_names(disks, request)
            new_raid = request.data.get("raid_level", pool.raid)

            if command == "add":
                # Only attached disks can be selected during an add operation.
                num_total_attached_disks = (pool.disk_set.attached().count() +
                                            num_disks_selected)
                for d in disks:
                    if d.pool is not None:
                        e_msg = ("Disk ({}) cannot be added to this pool ({}) "
                                 "because it belongs to another pool ({})"
                                 ".").format(d.name, pool.name, d.pool.name)
                        handle_exception(Exception(e_msg), request)
                    if d.btrfs_uuid is not None:
                        e_msg = ("Disk ({}) has a BTRFS filesystem from the "
                                 "past. If you really like to add it, wipe it "
                                 "from the Storage -> Disks screen of the "
                                 "web-ui.").format(d.name)
                        handle_exception(Exception(e_msg), request)

                if pool.raid == "single" and new_raid == "raid10":
                    # TODO: Consider removing once we have better space calc.
                    # Avoid extreme raid level change upwards (space issues).
                    e_msg = ("Pool migration from {} to {} is not supported."
                             ).format(pool.raid, new_raid)
                    handle_exception(Exception(e_msg), request)

                if new_raid == "raid10" and num_total_attached_disks < 4:
                    e_msg = ("A minimum of 4 drives are required for the "
                             "raid level: raid10.")
                    handle_exception(Exception(e_msg), request)

                if new_raid == "raid6" and num_total_attached_disks < 3:
                    e_msg = ("A minimum of 3 drives are required for the "
                             "raid level: raid6.")
                    handle_exception(Exception(e_msg), request)

                if new_raid == "raid5" and num_total_attached_disks < 2:
                    e_msg = ("A minimum of 2 drives are required for the "
                             "raid level: raid5.")
                    handle_exception(Exception(e_msg), request)

                if PoolBalance.objects.filter(
                        pool=pool,
                        status__regex=
                        r"(started|running|cancelling|pausing|paused)",
                ).exists():  # noqa E501
                    e_msg = ("A Balance process is already running or paused "
                             "for this pool ({}). Resize is not supported "
                             "during a balance process.").format(pool.name)
                    handle_exception(Exception(e_msg), request)

                # _resize_pool_start() add dev mode is quick so no async or tid
                self._resize_pool_start(pool, dnames)
                force = False
                # During dev add we also offer raid level change, if selected
                # blanket apply '-f' to allow for reducing metadata integrity.
                if new_raid != pool.raid:
                    force = True
                # Django-ztask initialization as balance is long running.
                tid = self._balance_start(pool, force=force, convert=new_raid)
                ps = PoolBalance(pool=pool, tid=tid)
                ps.save()

                pool.raid = new_raid
                for d_o in disks:
                    d_o.pool = pool
                    d_o.save()
                # Now we ensure udev info is updated via system wide trigger
                trigger_udev_update()

            elif command == "remove":
                if new_raid != pool.raid:
                    e_msg = "Raid configuration cannot be changed while removing disks."
                    handle_exception(Exception(e_msg), request)
                detached_disks_selected = 0
                for d in disks:  # to be removed
                    if d.pool is None or d.pool != pool:
                        e_msg = ("Disk ({}) cannot be removed because it does "
                                 "not belong to this "
                                 "pool ({}).").format(d.name, pool.name)
                        handle_exception(Exception(e_msg), request)
                    if re.match("detached-", d.name) is not None:
                        detached_disks_selected += 1
                if detached_disks_selected >= 2:
                    # We translate the removal of a detached device into:
                    # "btrfs device delete missing mnt_pt"
                    # but only when appropriate, this removes the first 'missing' dev.
                    # A detached disk is not necessarily missing, but an indication of
                    # prior pool association.
                    e_msg = (
                        "Detached disk selection is limited to a single device. "
                        "If all Pool members are detached all will be removed "
                        "and their pool automatically deleted there after.")
                    handle_exception(Exception(e_msg), request)
                attached_disks_selected = num_disks_selected - detached_disks_selected
                remaining_attached_disks = (pool.disk_set.attached().count() -
                                            attached_disks_selected)
                # Add check for attempt to remove detached & attached disks concurrently
                if detached_disks_selected > 0 and attached_disks_selected > 0:
                    e_msg = (
                        "Mixed detached and attached disk selection is "
                        "not supported. Limit your selection to only attached "
                        "disks, or a single detached disk.")
                    handle_exception(Exception(e_msg), request)
                # Skip all further sanity checks when all members are detached.
                if not all_members_detached:
                    if pool.raid == "raid0":
                        e_msg = (
                            "Disks cannot be removed from a pool with this "
                            "raid ({}) configuration.").format(pool.raid)
                        handle_exception(Exception(e_msg), request)

                    if pool.raid == "raid1" and remaining_attached_disks < 2:
                        e_msg = ("Disks cannot be removed from this pool "
                                 "because its raid configuration (raid1) "
                                 "requires a minimum of 2 disks.")
                        handle_exception(Exception(e_msg), request)

                    if pool.raid == "raid10" and remaining_attached_disks < 4:
                        e_msg = ("Disks cannot be removed from this pool "
                                 "because its raid configuration (raid10) "
                                 "requires a minimum of 4 disks.")
                        handle_exception(Exception(e_msg), request)

                    if pool.raid == "raid5" and remaining_attached_disks < 2:
                        e_msg = (
                            "Disks cannot be removed from this pool because "
                            "its raid configuration (raid5) requires a "
                            "minimum of 2 disks.")
                        handle_exception(Exception(e_msg), request)

                    if pool.raid == "raid6" and remaining_attached_disks < 3:
                        e_msg = (
                            "Disks cannot be removed from this pool because "
                            "its raid configuration (raid6) requires a "
                            "minimum of 3 disks.")
                        handle_exception(Exception(e_msg), request)

                    usage = pool_usage("/{}/{}".format(settings.MNT_PT,
                                                       pool.name))
                    size_cut = 0
                    for d in disks:  # to be removed
                        size_cut += d.allocated
                    available_free = pool.size - usage
                    if size_cut >= available_free:
                        e_msg = ("Removing disks ({}) may shrink the pool by "
                                 "{} KB, which is greater than available free "
                                 "space {} KB. This is "
                                 "not supported.").format(
                                     dnames, size_cut, available_free)
                        handle_exception(Exception(e_msg), request)

                    # Unlike resize_pool_start() with add=True a remove has an
                    # implicit balance where the removed disks contents are
                    # re-distributed across the remaining pool members.
                    # This internal balance cannot currently be monitored by the
                    # usual 'btrfs balance status /mnt_pt' command. So we have to
                    # use our own mechanism to assess it's status.
                    # Django-ztask initialization:
                    tid = self._resize_pool_start(pool, dnames, add=False)
                    ps = PoolBalance(pool=pool, tid=tid, internal=True)
                    ps.save()
                    # Setting disk.pool = None for all removed members is redundant
                    # as our next disk scan will re-find them until such time as
                    # our async task, and it's associated dev remove, has completed
                    # it's internal balance. This can take hours. Except for db only
                    # event of all_members_detached.

                else:  # all_members_detached:
                    # If all members are detached then delete pool associations for all.
                    # We cannot mount and so cannot perform any resize or any further
                    # pool member validation anyway.
                    # N.B. on next pool refresh, no members leads to pool removal.
                    for d in pool.disk_set.all():
                        d.pool = None
                        d.save()

            else:
                e_msg = "Command ({}) is not supported.".format(command)
                handle_exception(Exception(e_msg), request)
            pool.size = pool.usage_bound()
            pool.save()
            return Response(PoolInfoSerializer(pool).data)
Beispiel #17
0
    def post(self, request):
        """
        input is a list of disks, raid_level and name of the pool.
        """
        with self._handle_exception(request):
            disks = [
                self._validate_disk(d, request)
                for d in request.data.get('disks')
            ]
            pname = request.data['pname']
            if (re.match('%s$' % settings.POOL_REGEX, pname) is None):
                e_msg = ('Pool name must start with a alphanumeric(a-z0-9) '
                         'character and can be followed by any of the '
                         'following characters: letter(a-z), digits(0-9), '
                         'hyphen(-), underscore(_) or a period(.).')
                handle_exception(Exception(e_msg), request)

            if (Pool.objects.filter(name=pname).exists()):
                e_msg = ('Pool(%s) already exists. Choose a different name' %
                         pname)
                handle_exception(Exception(e_msg), request)

            if (Share.objects.filter(name=pname).exists()):
                e_msg = (
                    'A Share with this name(%s) exists. Pool and Share names '
                    'must be distinct. Choose a different name' % pname)
                handle_exception(Exception(e_msg), request)

            for d in disks:
                if (d.btrfs_uuid is not None):
                    e_msg = ('Another BTRFS filesystem exists on this '
                             'disk(%s). Erase the disk and try again.' %
                             d.name)
                    handle_exception(Exception(e_msg), request)

            raid_level = request.data['raid_level']
            if (raid_level not in self.RAID_LEVELS):
                e_msg = ('Unsupported raid level. use one of: {}'.format(
                    self.RAID_LEVELS))
                handle_exception(Exception(e_msg), request)
            # consolidated raid0 & raid 1 disk check
            if (raid_level in self.RAID_LEVELS[1:3] and len(disks) <= 1):
                e_msg = ('At least two disks are required for the raid level: '
                         '%s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[3]):
                if (len(disks) < 4):
                    e_msg = ('A minimum of Four drives are required for the '
                             'raid level: %s' % raid_level)
                    handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[4] and len(disks) < 2):
                e_msg = ('Two or more disks are required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[5] and len(disks) < 3):
                e_msg = ('Three or more disks are required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)

            compression = self._validate_compression(request)
            mnt_options = self._validate_mnt_options(request)
            dnames = [d.name for d in disks]
            p = Pool(name=pname,
                     raid=raid_level,
                     compression=compression,
                     mnt_options=mnt_options)
            add_pool(p, dnames)
            p.size = pool_usage(mount_root(p, dnames[0]))[0]
            p.uuid = btrfs_uuid(dnames[0])
            p.disk_set.add(*disks)
            p.save()
            # added for loop to save disks
            # appears p.disk_set.add(*disks) was not saving disks in test environment
            for d in disks:
                d.pool = p
                d.save()
            return Response(PoolInfoSerializer(p).data)