def post(self, request, pid, command=None): pool = self._validate_pool(pid, request) if (command is not None and command != 'status'): e_msg = ('Unknown balance command: %s' % command) handle_exception(Exception(e_msg), request) with self._handle_exception(request): ps = self._balance_status(pool) if (command == 'status'): return Response(PoolBalanceSerializer(ps).data) force = request.data.get('force', False) if ((PoolBalance.objects.filter(pool=pool, status__regex=r'(started|running)') .exists())): if (force): p = PoolBalance.objects.filter( pool=pool, status__regex=r'(started|running)').order_by('-id')[0] p.status = 'terminated' p.save() else: e_msg = ('A Balance process is already running for ' 'pool(%s).' % pool.name) handle_exception(Exception(e_msg), request) tid = self._balance_start(pool, force=force) ps = PoolBalance(pool=pool, tid=tid) ps.save() return Response(PoolBalanceSerializer(ps).data)
def post(self, request, pid, command=None): pool = self._validate_pool(pid, request) if command is not None and command != "status": e_msg = "Unknown balance command ({}).".format(command) handle_exception(Exception(e_msg), request) with self._handle_exception(request): ps = self._balance_status(pool) if command == "status": return Response(PoolBalanceSerializer(ps).data) force = request.data.get("force", False) if PoolBalance.objects.filter( pool=pool, status__regex=r"(started|running)").exists(): if force: p = PoolBalance.objects.filter( pool=pool, status__regex=r"(started|running)").order_by("-id")[0] p.status = "terminated" p.save() else: e_msg = ( "A Balance process is already running for pool ({})." ).format(pool.name) handle_exception(Exception(e_msg), request) tid = self._balance_start(pool, force=force) ps = PoolBalance(pool=pool, tid=tid) ps.save() return Response(PoolBalanceSerializer(ps).data)
def post(self, request, pname, command=None): pool = self._validate_pool(pname, request) if (command is not None and command != 'status'): e_msg = ('Unknown balance command: %s' % command) handle_exception(Exception(e_msg), request) with self._handle_exception(request): disk = Disk.objects.filter(pool=pool)[0] ps = self._balance_status(pool, disk) if (command == 'status'): return Response(PoolBalanceSerializer(ps).data) force = request.DATA.get('force', False) if ((PoolBalance.objects.filter(pool=pool, status__regex=r'(started|running)') .exists())): if (force): p = PoolBalance.objects.filter( pool=pool, status__regex=r'(started|running)').order_by('-id')[0] p.status = 'terminated' p.save() else: e_msg = ('A Balance process is already running for ' 'pool(%s).' % pname) handle_exception(Exception(e_msg), request) balance_pid = balance_start(pool, disk.name, force=force) ps = PoolBalance(pool=pool, pid=balance_pid) ps.save() return Response(PoolBalanceSerializer(ps).data)
def put(self, request, pid, command): """ resize a pool. @pname: pool's name @command: 'add' - add a list of disks and hence expand the pool 'remove' - remove a list of disks and hence shrink the pool 'remount' - remount the pool, to apply changed mount options 'quotas' - request pool quota setting change """ with self._handle_exception(request): try: pool = Pool.objects.get(id=pid) except: e_msg = "Pool with id ({}) does not exist.".format(pid) handle_exception(Exception(e_msg), request) if pool.role == "root" and command != "quotas": e_msg = ("Edit operations are not allowed on this pool ({}) " "as it contains the operating " "system.").format(pool.name) handle_exception(Exception(e_msg), request) if command == "remount": return self._remount(request, pool) if command == "quotas": # There is a pending btrfs change that allows for quota state # change on unmounted Volumes (pools). return self._quotas(request, pool) # Establish missing and detached disk removal request flag defaults: remove_missing_disk_request = False all_members_detached = False if command == "remove" and request.data.get("disks", []) == ["missing"]: remove_missing_disk_request = True if (pool.disk_set.filter(name__startswith="detached-").count() == pool.disk_set.count()): all_members_detached = True if not pool.is_mounted: # If we are asked to remove the last disk in a pool and it's detached # then user has already been notified to not remove it if it's to be # re-attached. So skip our mount exception as not possible anyway unless # re-attached and we have already indicated that possible path. # All works accounts for all pool members in detached state. if all_members_detached: logger.info( "Skipping mount requirement: all pool's member are detached." ) else: e_msg = ( "Pool member / raid edits require an active mount. " 'Please see the "Maintenance required" section.') handle_exception(Exception(e_msg), request) if remove_missing_disk_request: disks = [] logger.debug( "Remove missing request, so skipping disk validation") else: disks = [ self._validate_disk_id(diskId, request) for diskId in request.data.get("disks", []) ] num_disks_selected = len(disks) dnames = self._role_filter_disk_names(disks, request) new_raid = request.data.get("raid_level", pool.raid) if command == "add": # Only attached disks can be selected during an add operation. num_total_attached_disks = (pool.disk_set.attached().count() + num_disks_selected) for d in disks: if d.pool is not None: e_msg = ("Disk ({}) cannot be added to this pool ({}) " "because it belongs to another pool ({})" ".").format(d.name, pool.name, d.pool.name) handle_exception(Exception(e_msg), request) if d.btrfs_uuid is not None: e_msg = ("Disk ({}) has a BTRFS filesystem from the " "past. If you really like to add it, wipe it " "from the Storage -> Disks screen of the " "web-ui.").format(d.name) handle_exception(Exception(e_msg), request) if pool.raid == "single" and new_raid == "raid10": # TODO: Consider removing once we have better space calc. # Avoid extreme raid level change upwards (space issues). e_msg = ("Pool migration from {} to {} is not supported." ).format(pool.raid, new_raid) handle_exception(Exception(e_msg), request) if new_raid == "raid10" and num_total_attached_disks < 4: e_msg = ("A minimum of 4 drives are required for the " "raid level: raid10.") handle_exception(Exception(e_msg), request) if new_raid == "raid6" and num_total_attached_disks < 3: e_msg = ("A minimum of 3 drives are required for the " "raid level: raid6.") handle_exception(Exception(e_msg), request) if new_raid == "raid5" and num_total_attached_disks < 2: e_msg = ("A minimum of 2 drives are required for the " "raid level: raid5.") handle_exception(Exception(e_msg), request) if PoolBalance.objects.filter( pool=pool, status__regex= r"(started|running|cancelling|pausing|paused)", ).exists(): # noqa E501 e_msg = ("A Balance process is already running or paused " "for this pool ({}). Resize is not supported " "during a balance process.").format(pool.name) handle_exception(Exception(e_msg), request) # _resize_pool_start() add dev mode is quick so no async or tid self._resize_pool_start(pool, dnames) force = False # During dev add we also offer raid level change, if selected # blanket apply '-f' to allow for reducing metadata integrity. if new_raid != pool.raid: force = True # Django-ztask initialization as balance is long running. tid = self._balance_start(pool, force=force, convert=new_raid) ps = PoolBalance(pool=pool, tid=tid) ps.save() pool.raid = new_raid for d_o in disks: d_o.pool = pool d_o.save() # Now we ensure udev info is updated via system wide trigger trigger_udev_update() elif command == "remove": if new_raid != pool.raid: e_msg = "Raid configuration cannot be changed while removing disks." handle_exception(Exception(e_msg), request) detached_disks_selected = 0 for d in disks: # to be removed if d.pool is None or d.pool != pool: e_msg = ("Disk ({}) cannot be removed because it does " "not belong to this " "pool ({}).").format(d.name, pool.name) handle_exception(Exception(e_msg), request) if re.match("detached-", d.name) is not None: detached_disks_selected += 1 if detached_disks_selected >= 2: # We translate the removal of a detached device into: # "btrfs device delete missing mnt_pt" # but only when appropriate, this removes the first 'missing' dev. # A detached disk is not necessarily missing, but an indication of # prior pool association. e_msg = ( "Detached disk selection is limited to a single device. " "If all Pool members are detached all will be removed " "and their pool automatically deleted there after.") handle_exception(Exception(e_msg), request) attached_disks_selected = num_disks_selected - detached_disks_selected remaining_attached_disks = (pool.disk_set.attached().count() - attached_disks_selected) # Add check for attempt to remove detached & attached disks concurrently if detached_disks_selected > 0 and attached_disks_selected > 0: e_msg = ( "Mixed detached and attached disk selection is " "not supported. Limit your selection to only attached " "disks, or a single detached disk.") handle_exception(Exception(e_msg), request) # Skip all further sanity checks when all members are detached. if not all_members_detached: if pool.raid == "raid0": e_msg = ( "Disks cannot be removed from a pool with this " "raid ({}) configuration.").format(pool.raid) handle_exception(Exception(e_msg), request) if pool.raid == "raid1" and remaining_attached_disks < 2: e_msg = ("Disks cannot be removed from this pool " "because its raid configuration (raid1) " "requires a minimum of 2 disks.") handle_exception(Exception(e_msg), request) if pool.raid == "raid10" and remaining_attached_disks < 4: e_msg = ("Disks cannot be removed from this pool " "because its raid configuration (raid10) " "requires a minimum of 4 disks.") handle_exception(Exception(e_msg), request) if pool.raid == "raid5" and remaining_attached_disks < 2: e_msg = ( "Disks cannot be removed from this pool because " "its raid configuration (raid5) requires a " "minimum of 2 disks.") handle_exception(Exception(e_msg), request) if pool.raid == "raid6" and remaining_attached_disks < 3: e_msg = ( "Disks cannot be removed from this pool because " "its raid configuration (raid6) requires a " "minimum of 3 disks.") handle_exception(Exception(e_msg), request) usage = pool_usage("/{}/{}".format(settings.MNT_PT, pool.name)) size_cut = 0 for d in disks: # to be removed size_cut += d.allocated available_free = pool.size - usage if size_cut >= available_free: e_msg = ("Removing disks ({}) may shrink the pool by " "{} KB, which is greater than available free " "space {} KB. This is " "not supported.").format( dnames, size_cut, available_free) handle_exception(Exception(e_msg), request) # Unlike resize_pool_start() with add=True a remove has an # implicit balance where the removed disks contents are # re-distributed across the remaining pool members. # This internal balance cannot currently be monitored by the # usual 'btrfs balance status /mnt_pt' command. So we have to # use our own mechanism to assess it's status. # Django-ztask initialization: tid = self._resize_pool_start(pool, dnames, add=False) ps = PoolBalance(pool=pool, tid=tid, internal=True) ps.save() # Setting disk.pool = None for all removed members is redundant # as our next disk scan will re-find them until such time as # our async task, and it's associated dev remove, has completed # it's internal balance. This can take hours. Except for db only # event of all_members_detached. else: # all_members_detached: # If all members are detached then delete pool associations for all. # We cannot mount and so cannot perform any resize or any further # pool member validation anyway. # N.B. on next pool refresh, no members leads to pool removal. for d in pool.disk_set.all(): d.pool = None d.save() else: e_msg = "Command ({}) is not supported.".format(command) handle_exception(Exception(e_msg), request) pool.size = pool.usage_bound() pool.save() return Response(PoolInfoSerializer(pool).data)
def put(self, request, pname, command): """ resize a pool. @pname: pool's name @command: 'add' - add a list of disks and hence expand the pool 'remove' - remove a list of disks and hence shrink the pool """ with self._handle_exception(request): try: pool = Pool.objects.get(name=pname) except: e_msg = ('Pool(%s) does not exist.' % pname) handle_exception(Exception(e_msg), request) if (pool.role == 'root'): e_msg = ('Edit operations are not allowed on this Pool(%s) ' 'as it contains the operating system.' % pname) handle_exception(Exception(e_msg), request) if (command == 'remount'): return self._remount(request, pool) disks = [self._validate_disk(d, request) for d in request.data.get('disks', [])] num_new_disks = len(disks) dnames = [d.name for d in disks] new_raid = request.data.get('raid_level', pool.raid) num_total_disks = (Disk.objects.filter(pool=pool).count() + num_new_disks) if (command == 'add'): for d in disks: if (d.pool is not None): e_msg = ('Disk(%s) cannot be added to this Pool(%s) ' 'because it belongs to another pool(%s)' % (d.name, pool.name, d.pool.name)) handle_exception(Exception(e_msg), request) if (d.btrfs_uuid is not None): e_msg = ('Disk(%s) has a BTRFS filesystem from the ' 'past. If you really like to add it, wipe it ' 'from the Storage -> Disks screen of the ' 'web-ui' % d.name) handle_exception(Exception(e_msg), request) if (new_raid == 'single'): e_msg = ('Pool migration from %s to %s is not supported.' % (pool.raid, new_raid)) handle_exception(Exception(e_msg), request) if (new_raid == 'raid10' and num_total_disks < 4): e_msg = ('A minimum of Four drives are required for the ' 'raid level: raid10') handle_exception(Exception(e_msg), request) if (new_raid == 'raid6' and num_total_disks < 3): e_msg = ('A minimum of Three drives are required for the ' 'raid level: raid6') handle_exception(Exception(e_msg), request) if (new_raid == 'raid5' and num_total_disks < 2): e_msg == ('A minimum of Two drives are required for the ' 'raid level: raid5') handle_exception(Exception(e_msg), request) if (PoolBalance.objects.filter( pool=pool, status__regex=r'(started|running)').exists()): e_msg = ('A Balance process is already running for this ' 'pool(%s). Resize is not supported during a ' 'balance process.' % pool.name) handle_exception(Exception(e_msg), request) resize_pool(pool, dnames) tid = self._balance_start(pool, convert=new_raid) ps = PoolBalance(pool=pool, tid=tid) ps.save() pool.raid = new_raid for d_o in disks: d_o.pool = pool d_o.save() elif (command == 'remove'): if (new_raid != pool.raid): e_msg = ('Raid configuration cannot be changed while ' 'removing disks') handle_exception(Exception(e_msg), request) for d in disks: if (d.pool is None or d.pool != pool): e_msg = ('Disk(%s) cannot be removed because it does ' 'not belong to this Pool(%s)' % (d.name, pool.name)) handle_exception(Exception(e_msg), request) remaining_disks = (Disk.objects.filter(pool=pool).count() - num_new_disks) if (pool.raid in ('raid0', 'single',)): e_msg = ('Disks cannot be removed from a pool with this ' 'raid(%s) configuration' % pool.raid) handle_exception(Exception(e_msg), request) if (pool.raid == 'raid1' and remaining_disks < 2): e_msg = ('Disks cannot be removed from this pool ' 'because its raid configuration(raid1) ' 'requires a minimum of 2 disks') handle_exception(Exception(e_msg), request) if (pool.raid == 'raid10' and remaining_disks < 4): e_msg = ('Disks cannot be removed from this pool ' 'because its raid configuration(raid10) ' 'requires a minimum of 4 disks') handle_exception(Exception(e_msg), request) if (pool.raid == 'raid5' and remaining_disks < 2): e_msg = ('Disks cannot be removed from this pool because ' 'its raid configuration(raid5) requires a ' 'minimum of 2 disks') handle_exception(Exception(e_msg), request) if (pool.raid == 'raid6' and remaining_disks < 3): e_msg = ('Disks cannot be removed from this pool because ' 'its raid configuration(raid6) requires a ' 'minimum of 3 disks') handle_exception(Exception(e_msg), request) usage = pool_usage('/%s/%s' % (settings.MNT_PT, pool.name)) size_cut = 0 for d in disks: size_cut += d.size if (size_cut >= usage[2]): e_msg = ('Removing these(%s) disks may shrink the pool by ' '%dKB, which is greater than available free space' ' %dKB. This is not supported.' % (dnames, size_cut, usage[2])) handle_exception(Exception(e_msg), request) resize_pool(pool, dnames, add=False) tid = self._balance_start(pool) ps = PoolBalance(pool=pool, tid=tid) ps.save() for d in disks: d.pool = None d.save() else: e_msg = ('command(%s) is not supported.' % command) handle_exception(Exception(e_msg), request) usage = pool_usage('/%s/%s' % (settings.MNT_PT, pool.name)) pool.size = usage[0] pool.save() return Response(PoolInfoSerializer(pool).data)
def put(self, request, pid, command): """ resize a pool. @pname: pool's name @command: 'add' - add a list of disks and hence expand the pool 'remove' - remove a list of disks and hence shrink the pool """ with self._handle_exception(request): try: pool = Pool.objects.get(id=pid) except: e_msg = ('Pool(%d) does not exist.' % pid) handle_exception(Exception(e_msg), request) if (pool.role == 'root'): e_msg = ('Edit operations are not allowed on this Pool(%d) ' 'as it contains the operating system.' % pid) handle_exception(Exception(e_msg), request) if (command == 'remount'): return self._remount(request, pool) disks = [ self._validate_disk(d, request) for d in request.data.get('disks', []) ] num_new_disks = len(disks) dnames = self._role_filter_disk_names(disks, request) new_raid = request.data.get('raid_level', pool.raid) num_total_disks = (Disk.objects.filter(pool=pool).count() + num_new_disks) if (command == 'add'): for d in disks: if (d.pool is not None): e_msg = ('Disk(%s) cannot be added to this Pool(%s) ' 'because it belongs to another pool(%s)' % (d.name, pool.name, d.pool.name)) handle_exception(Exception(e_msg), request) if (d.btrfs_uuid is not None): e_msg = ('Disk(%s) has a BTRFS filesystem from the ' 'past. If you really like to add it, wipe it ' 'from the Storage -> Disks screen of the ' 'web-ui' % d.name) handle_exception(Exception(e_msg), request) if (pool.raid != 'single' and new_raid == 'single'): e_msg = ('Pool migration from %s to %s is not supported.' % (pool.raid, new_raid)) handle_exception(Exception(e_msg), request) if (new_raid == 'raid10' and num_total_disks < 4): e_msg = ('A minimum of Four drives are required for the ' 'raid level: raid10') handle_exception(Exception(e_msg), request) if (new_raid == 'raid6' and num_total_disks < 3): e_msg = ('A minimum of Three drives are required for the ' 'raid level: raid6') handle_exception(Exception(e_msg), request) if (new_raid == 'raid5' and num_total_disks < 2): e_msg = ('A minimum of Two drives are required for the ' 'raid level: raid5') handle_exception(Exception(e_msg), request) if (PoolBalance.objects.filter( pool=pool, status__regex= r'(started|running|cancelling|pausing|paused)').exists( )): # noqa E501 e_msg = ('A Balance process is already running or paused ' 'for this pool(%s). Resize is not supported ' 'during a balance process.' % pool.name) handle_exception(Exception(e_msg), request) resize_pool(pool, dnames) tid = self._balance_start(pool, convert=new_raid) ps = PoolBalance(pool=pool, tid=tid) ps.save() pool.raid = new_raid for d_o in disks: d_o.pool = pool d_o.save() # Now we ensure udev info is updated via system wide trigger trigger_udev_update() elif (command == 'remove'): if (new_raid != pool.raid): e_msg = ('Raid configuration cannot be changed while ' 'removing disks') handle_exception(Exception(e_msg), request) for d in disks: if (d.pool is None or d.pool != pool): e_msg = ('Disk(%s) cannot be removed because it does ' 'not belong to this Pool(%s)' % (d.name, pool.name)) handle_exception(Exception(e_msg), request) remaining_disks = (Disk.objects.filter(pool=pool).count() - num_new_disks) if (pool.raid == 'raid0'): e_msg = ('Disks cannot be removed from a pool with this ' 'raid(%s) configuration' % pool.raid) handle_exception(Exception(e_msg), request) if (pool.raid == 'raid1' and remaining_disks < 2): e_msg = ('Disks cannot be removed from this pool ' 'because its raid configuration(raid1) ' 'requires a minimum of 2 disks') handle_exception(Exception(e_msg), request) if (pool.raid == 'raid10' and remaining_disks < 4): e_msg = ('Disks cannot be removed from this pool ' 'because its raid configuration(raid10) ' 'requires a minimum of 4 disks') handle_exception(Exception(e_msg), request) if (pool.raid == 'raid5' and remaining_disks < 2): e_msg = ('Disks cannot be removed from this pool because ' 'its raid configuration(raid5) requires a ' 'minimum of 2 disks') handle_exception(Exception(e_msg), request) if (pool.raid == 'raid6' and remaining_disks < 3): e_msg = ('Disks cannot be removed from this pool because ' 'its raid configuration(raid6) requires a ' 'minimum of 3 disks') handle_exception(Exception(e_msg), request) usage = pool_usage('/%s/%s' % (settings.MNT_PT, pool.name)) size_cut = 0 for d in disks: size_cut += d.size if size_cut >= (pool.size - usage): e_msg = ('Removing these(%s) disks may shrink the pool by ' '%dKB, which is greater than available free space' ' %dKB. This is not supported.' % (dnames, size_cut, usage)) handle_exception(Exception(e_msg), request) resize_pool(pool, dnames, add=False) tid = self._balance_start(pool) ps = PoolBalance(pool=pool, tid=tid) ps.save() for d in disks: d.pool = None d.save() else: e_msg = ('command(%s) is not supported.' % command) handle_exception(Exception(e_msg), request) pool.size = pool.usage_bound() pool.save() return Response(PoolInfoSerializer(pool).data)
def put(self, request, pid, command): """ resize a pool. @pname: pool's name @command: 'add' - add a list of disks and hence expand the pool 'remove' - remove a list of disks and hence shrink the pool 'remount' - remount the pool, to apply changed mount options 'quotas' - request pool quota setting change """ with self._handle_exception(request): try: pool = Pool.objects.get(id=pid) except: e_msg = 'Pool with id ({}) does not exist.'.format(pid) handle_exception(Exception(e_msg), request) if (pool.role == 'root' and command != 'quotas'): e_msg = ('Edit operations are not allowed on this pool ({}) ' 'as it contains the operating ' 'system.').format(pool.name) handle_exception(Exception(e_msg), request) if (command == 'remount'): return self._remount(request, pool) if (command == 'quotas'): # There is a pending btrfs change that allows for quota state # change on unmounted Volumes (pools). return self._quotas(request, pool) if not pool.is_mounted: e_msg = ('Pool member / raid edits require an active mount. ' 'Please see the "Maintenance required" section.') handle_exception(Exception(e_msg), request) if command == 'remove' and \ request.data.get('disks', []) == ['missing']: disks = [] logger.debug('Remove missing request skipping disk validation') else: disks = [ self._validate_disk_id(diskId, request) for diskId in request.data.get('disks', []) ] num_disks_selected = len(disks) dnames = self._role_filter_disk_names(disks, request) new_raid = request.data.get('raid_level', pool.raid) if (command == 'add'): # Only attached disks can be selected during an add operation. num_total_attached_disks = pool.disk_set.attached().count() \ + num_disks_selected for d in disks: if (d.pool is not None): e_msg = ('Disk ({}) cannot be added to this pool ({}) ' 'because it belongs to another pool ({})' '.').format(d.name, pool.name, d.pool.name) handle_exception(Exception(e_msg), request) if (d.btrfs_uuid is not None): e_msg = ('Disk ({}) has a BTRFS filesystem from the ' 'past. If you really like to add it, wipe it ' 'from the Storage -> Disks screen of the ' 'web-ui.').format(d.name) handle_exception(Exception(e_msg), request) if pool.raid == 'single' and new_raid == 'raid10': # TODO: Consider removing once we have better space calc. # Avoid extreme raid level change upwards (space issues). e_msg = ('Pool migration from {} to {} is not ' 'supported.').format(pool.raid, new_raid) handle_exception(Exception(e_msg), request) if (new_raid == 'raid10' and num_total_attached_disks < 4): e_msg = ('A minimum of 4 drives are required for the ' 'raid level: raid10.') handle_exception(Exception(e_msg), request) if (new_raid == 'raid6' and num_total_attached_disks < 3): e_msg = ('A minimum of 3 drives are required for the ' 'raid level: raid6.') handle_exception(Exception(e_msg), request) if (new_raid == 'raid5' and num_total_attached_disks < 2): e_msg = ('A minimum of 2 drives are required for the ' 'raid level: raid5.') handle_exception(Exception(e_msg), request) if (PoolBalance.objects.filter( pool=pool, status__regex= r'(started|running|cancelling|pausing|paused)').exists( )): # noqa E501 e_msg = ('A Balance process is already running or paused ' 'for this pool ({}). Resize is not supported ' 'during a balance process.').format(pool.name) handle_exception(Exception(e_msg), request) # _resize_pool_start() add dev mode is quick so no async or tid self._resize_pool_start(pool, dnames) force = False # During dev add we also offer raid level change, if selected # blanket apply '-f' to allow for reducing metadata integrity. if new_raid != pool.raid: force = True # Django-ztask initialization as balance is long running. tid = self._balance_start(pool, force=force, convert=new_raid) ps = PoolBalance(pool=pool, tid=tid) ps.save() pool.raid = new_raid for d_o in disks: d_o.pool = pool d_o.save() # Now we ensure udev info is updated via system wide trigger trigger_udev_update() elif (command == 'remove'): if (new_raid != pool.raid): e_msg = ('Raid configuration cannot be changed while ' 'removing disks.') handle_exception(Exception(e_msg), request) detached_disks_selected = 0 for d in disks: # to be removed if (d.pool is None or d.pool != pool): e_msg = ('Disk ({}) cannot be removed because it does ' 'not belong to this ' 'pool ({}).').format(d.name, pool.name) handle_exception(Exception(e_msg), request) if re.match('detached-', d.name) is not None: detached_disks_selected += 1 if detached_disks_selected >= 3: # Artificial constraint but no current btrfs raid level yet # allows for > 2 dev detached and we have a mounted vol. e_msg = ('We currently only support removing two' 'detached disks at a time.') handle_exception(Exception(e_msg), request) attached_disks_selected = (num_disks_selected - detached_disks_selected) remaining_attached_disks = (pool.disk_set.attached().count() - attached_disks_selected) if (pool.raid == 'raid0'): e_msg = ('Disks cannot be removed from a pool with this ' 'raid ({}) configuration.').format(pool.raid) handle_exception(Exception(e_msg), request) if (pool.raid == 'raid1' and remaining_attached_disks < 2): e_msg = ('Disks cannot be removed from this pool ' 'because its raid configuration (raid1) ' 'requires a minimum of 2 disks.') handle_exception(Exception(e_msg), request) if (pool.raid == 'raid10' and remaining_attached_disks < 4): e_msg = ('Disks cannot be removed from this pool ' 'because its raid configuration (raid10) ' 'requires a minimum of 4 disks.') handle_exception(Exception(e_msg), request) if (pool.raid == 'raid5' and remaining_attached_disks < 2): e_msg = ('Disks cannot be removed from this pool because ' 'its raid configuration (raid5) requires a ' 'minimum of 2 disks.') handle_exception(Exception(e_msg), request) if (pool.raid == 'raid6' and remaining_attached_disks < 3): e_msg = ('Disks cannot be removed from this pool because ' 'its raid configuration (raid6) requires a ' 'minimum of 3 disks.') handle_exception(Exception(e_msg), request) usage = pool_usage('/%s/%s' % (settings.MNT_PT, pool.name)) size_cut = 0 for d in disks: size_cut += d.allocated if size_cut >= (pool.size - usage): e_msg = ('Removing disks ({}) may shrink the pool by ' '{} KB, which is greater than available free ' 'space {} KB. This is ' 'not supported.').format(dnames, size_cut, usage) handle_exception(Exception(e_msg), request) # Unlike resize_pool_start() with add=True a remove has an # implicit balance where the removed disks contents are # re-distributed across the remaining pool members. # This internal balance cannot currently be monitored by the # usual 'btrfs balance status /mnt_pt' command. So we have to # use our own mechanism to assess it's status. # Django-ztask initialization: tid = self._resize_pool_start(pool, dnames, add=False) ps = PoolBalance(pool=pool, tid=tid, internal=True) ps.save() # Setting disk.pool = None for all removed members is redundant # as our next disk scan will re-find them until such time as # our async task, and it's associated dev remove, has completed # it's internal balance. This can take hours. else: e_msg = 'Command ({}) is not supported.'.format(command) handle_exception(Exception(e_msg), request) pool.size = pool.usage_bound() pool.save() return Response(PoolInfoSerializer(pool).data)
def put(self, request, pid, command): """ resize a pool. @pname: pool's name @command: 'add' - add a list of disks and hence expand the pool 'remove' - remove a list of disks and hence shrink the pool """ with self._handle_exception(request): try: pool = Pool.objects.get(id=pid) except: e_msg = 'Pool with id ({}) does not exist.'.format(pid) handle_exception(Exception(e_msg), request) if (pool.role == 'root' and command != 'quotas'): e_msg = ('Edit operations are not allowed on this pool ({}) ' 'as it contains the operating ' 'system.').format(pool.name) handle_exception(Exception(e_msg), request) if (command == 'remount'): return self._remount(request, pool) if (command == 'quotas'): return self._quotas(request, pool) disks = [ self._validate_disk(d, request) for d in request.data.get('disks', []) ] num_new_disks = len(disks) dnames = self._role_filter_disk_names(disks, request) new_raid = request.data.get('raid_level', pool.raid) num_total_disks = (Disk.objects.filter(pool=pool).count() + num_new_disks) if (command == 'add'): for d in disks: if (d.pool is not None): e_msg = ('Disk ({}) cannot be added to this pool ({}) ' 'because it belongs to another pool ({})' '.').format(d.name, pool.name, d.pool.name) handle_exception(Exception(e_msg), request) if (d.btrfs_uuid is not None): e_msg = ('Disk ({}) has a BTRFS filesystem from the ' 'past. If you really like to add it, wipe it ' 'from the Storage -> Disks screen of the ' 'web-ui.').format(d.name) handle_exception(Exception(e_msg), request) if pool.raid == 'single' and new_raid == 'raid10': # TODO: Consider removing once we have better space calc. # Avoid extreme raid level change upwards (space issues). e_msg = ('Pool migration from {} to {} is not ' 'supported.').format(pool.raid, new_raid) handle_exception(Exception(e_msg), request) if (new_raid == 'raid10' and num_total_disks < 4): e_msg = ('A minimum of 4 drives are required for the ' 'raid level: raid10.') handle_exception(Exception(e_msg), request) if (new_raid == 'raid6' and num_total_disks < 3): e_msg = ('A minimum of 3 drives are required for the ' 'raid level: raid6.') handle_exception(Exception(e_msg), request) if (new_raid == 'raid5' and num_total_disks < 2): e_msg = ('A minimum of 2 drives are required for the ' 'raid level: raid5.') handle_exception(Exception(e_msg), request) if (PoolBalance.objects.filter( pool=pool, status__regex= r'(started|running|cancelling|pausing|paused)').exists( )): # noqa E501 e_msg = ('A Balance process is already running or paused ' 'for this pool ({}). Resize is not supported ' 'during a balance process.').format(pool.name) handle_exception(Exception(e_msg), request) resize_pool(pool, dnames) # During dev add we also offer raid level change, if selected # blanket apply '-f' to allow for reducing metadata integrity. force = False if new_raid != pool.raid: force = True tid = self._balance_start(pool, force=force, convert=new_raid) ps = PoolBalance(pool=pool, tid=tid) ps.save() pool.raid = new_raid for d_o in disks: d_o.pool = pool d_o.save() # Now we ensure udev info is updated via system wide trigger trigger_udev_update() elif (command == 'remove'): if (new_raid != pool.raid): e_msg = ('Raid configuration cannot be changed while ' 'removing disks.') handle_exception(Exception(e_msg), request) for d in disks: if (d.pool is None or d.pool != pool): e_msg = ('Disk ({}) cannot be removed because it does ' 'not belong to this ' 'pool ({}).').format(d.name, pool.name) handle_exception(Exception(e_msg), request) remaining_disks = (Disk.objects.filter(pool=pool).count() - num_new_disks) if (pool.raid == 'raid0'): e_msg = ('Disks cannot be removed from a pool with this ' 'raid ({}) configuration.').format(pool.raid) handle_exception(Exception(e_msg), request) if (pool.raid == 'raid1' and remaining_disks < 2): e_msg = ('Disks cannot be removed from this pool ' 'because its raid configuration (raid1) ' 'requires a minimum of 2 disks.') handle_exception(Exception(e_msg), request) if (pool.raid == 'raid10' and remaining_disks < 4): e_msg = ('Disks cannot be removed from this pool ' 'because its raid configuration (raid10) ' 'requires a minimum of 4 disks.') handle_exception(Exception(e_msg), request) if (pool.raid == 'raid5' and remaining_disks < 2): e_msg = ('Disks cannot be removed from this pool because ' 'its raid configuration (raid5) requires a ' 'minimum of 2 disks.') handle_exception(Exception(e_msg), request) if (pool.raid == 'raid6' and remaining_disks < 3): e_msg = ('Disks cannot be removed from this pool because ' 'its raid configuration (raid6) requires a ' 'minimum of 3 disks.') handle_exception(Exception(e_msg), request) usage = pool_usage('/%s/%s' % (settings.MNT_PT, pool.name)) size_cut = 0 for d in disks: size_cut += d.size if size_cut >= (pool.size - usage): e_msg = ('Removing disks ({}) may shrink the pool by ' '{} KB, which is greater than available free ' 'space {} KB. This is ' 'not supported.').format(dnames, size_cut, usage) handle_exception(Exception(e_msg), request) resize_pool(pool, dnames, add=False) tid = self._balance_start(pool) ps = PoolBalance(pool=pool, tid=tid) ps.save() for d in disks: d.pool = None d.save() else: e_msg = 'Command ({}) is not supported.'.format(command) handle_exception(Exception(e_msg), request) pool.size = pool.usage_bound() pool.save() return Response(PoolInfoSerializer(pool).data)
def put(self, request, pname, command): """ resize a pool. @pname: pool's name @command: 'add' - add a list of disks and hence expand the pool 'remove' - remove a list of disks and hence shrink the pool """ with self._handle_exception(request): if (pname == settings.ROOT_POOL): e_msg = ('Edit operations are not allowed on this Pool(%s) ' 'as it contains the operating system.' % pname) handle_exception(Exception(e_msg), request) try: pool = Pool.objects.get(name=pname) except: e_msg = ('Pool(%s) does not exist.' % pname) handle_exception(Exception(e_msg), request) if (command == 'remount'): return self._remount(request, pool) disks = [self._validate_disk(d, request) for d in request.data.get('disks')] num_new_disks = len(disks) if (num_new_disks == 0): e_msg = ('List of disks in the input cannot be empty.') handle_exception(Exception(e_msg), request) dnames = [d.name for d in disks] mount_disk = Disk.objects.filter(pool=pool)[0].name new_raid = request.data.get('raid_level', pool.raid) num_total_disks = (Disk.objects.filter(pool=pool).count() + num_new_disks) usage = pool_usage('/%s/%s' % (settings.MNT_PT, pool.name)) # free_percent = (usage[2]/usage[0]) * 100 free_percent = (usage[2]* 100)/usage[0] threshold_percent = self.ADD_THRESHOLD * 100 if (command == 'add'): for d in disks: if (d.pool is not None): e_msg = ('Disk(%s) cannot be added to this Pool(%s) ' 'because it belongs to another pool(%s)' % (d.name, pool.name, d.pool.name)) handle_exception(Exception(e_msg), request) if (d.btrfs_uuid is not None): e_msg = ('Disk(%s) has a BTRFS filesystem from the ' 'past. If you really like to add it, wipe it ' 'from the Storage -> Disks screen of the ' 'web-ui' % d.name) handle_exception(Exception(e_msg), request) if (new_raid not in self.SUPPORTED_MIGRATIONS[pool.raid]): e_msg = ('Pool migration from %s to %s is not supported.' % (pool.raid, new_raid)) handle_exception(Exception(e_msg), request) if (PoolBalance.objects.filter( pool=pool, status__regex=r'(started|running)').exists()): e_msg = ('A Balance process is already running for this ' 'pool(%s). Resize is not supported during a ' 'balance process.' % pool.name) handle_exception(Exception(e_msg), request) if (free_percent < threshold_percent): e_msg = ('Resize is only supported when there is at least ' '%d percent free space available. But currently ' 'only %d percent is free. Remove some data and ' 'try again.' % (threshold_percent, free_percent)) handle_exception(Exception(e_msg), request) if (new_raid != pool.raid): if (((pool.raid in ('single', 'raid0')) and new_raid in ('raid1', 'raid10'))): cur_num_disks = num_total_disks - num_new_disks if (num_new_disks < cur_num_disks): e_msg = ('For single/raid0 to raid1/raid10 ' 'conversion, at least as many as present ' 'number of disks must be added. %d ' 'disks are provided, but at least %d are ' 'required.' % (num_new_disks, cur_num_disks)) handle_exception(Exception(e_msg), request) resize_pool(pool, mount_disk, dnames) balance_pid = balance_start(pool, mount_disk, convert=new_raid) ps = PoolBalance(pool=pool, pid=balance_pid) ps.save() pool.raid = new_raid for d_o in disks: d_o.pool = pool d_o.save() elif (command == 'remove'): if (new_raid != pool.raid): e_msg = ('Raid configuration cannot be changed while ' 'removing disks') handle_exception(Exception(e_msg), request) for d in disks: if (d.pool is None or d.pool != pool): e_msg = ('Disk(%s) cannot be removed because it does ' 'not belong to this Pool(%s)' % (d.name, pool.name)) handle_exception(Exception(e_msg), request) remaining_disks = (Disk.objects.filter(pool=pool).count() - num_new_disks) if (pool.raid in ('raid0', 'single',)): e_msg = ('Disks cannot be removed from a pool with this ' 'raid(%s) configuration' % pool.raid) handle_exception(Exception(e_msg), request) if (pool.raid in ('raid5', 'raid6',)): e_msg = ('Disk removal is not supported for pools with ' 'raid5/6 configuration') handle_exception(Exception(e_msg), request) if (pool.raid == 'raid10'): if (num_new_disks != 2): e_msg = ('Only two disks can be removed at once from ' 'this pool because of its raid ' 'configuration(%s)' % pool.raid) handle_exception(Exception(e_msg), request) elif (remaining_disks < 4): e_msg = ('Disks cannot be removed from this pool ' 'because its raid configuration(%s) ' 'requires a minimum of 4 disks' % pool.raid) handle_exception(Exception(e_msg), request) elif (pool.raid == 'raid1'): if (num_new_disks != 1): e_msg = ('Only one disk can be removed at once from ' 'this pool because of its raid ' 'configuration(%s)' % pool.raid) handle_exception(Exception(e_msg), request) elif (remaining_disks < 2): e_msg = ('Disks cannot be removed from this pool ' 'because its raid configuration(%s) ' 'requires a minimum of 2 disks' % pool.raid) handle_exception(Exception(e_msg), request) threshold_percent = 100 - threshold_percent if (free_percent < threshold_percent): e_msg = ('Removing disks is only supported when there is ' 'at least %d percent free space available. But ' 'currently only %d percent is free. Remove some ' 'data and try again.' % (threshold_percent, free_percent)) handle_exception(Exception(e_msg), request) resize_pool(pool, mount_disk, dnames, add=False) balance_pid = balance_start(pool, mount_disk) ps = PoolBalance(pool=pool, pid=balance_pid) ps.save() for d in disks: d.pool = None d.save() else: e_msg = ('command(%s) is not supported.' % command) handle_exception(Exception(e_msg), request) usage = pool_usage('/%s/%s' % (settings.MNT_PT, pool.name)) pool.size = usage[0] pool.save() return Response(PoolInfoSerializer(pool).data)
def put(self, request, pid, command): """ resize a pool. @pname: pool's name @command: 'add' - add a list of disks and hence expand the pool 'remove' - remove a list of disks and hence shrink the pool 'remount' - remount the pool, to apply changed mount options 'quotas' - request pool quota setting change """ with self._handle_exception(request): try: pool = Pool.objects.get(id=pid) except: e_msg = 'Pool with id ({}) does not exist.'.format(pid) handle_exception(Exception(e_msg), request) if (pool.role == 'root' and command != 'quotas'): e_msg = ('Edit operations are not allowed on this pool ({}) ' 'as it contains the operating ' 'system.').format(pool.name) handle_exception(Exception(e_msg), request) if (command == 'remount'): return self._remount(request, pool) if (command == 'quotas'): # There is a pending btrfs change that allows for quota state # change on unmounted Volumes (pools). return self._quotas(request, pool) if not pool.is_mounted: e_msg = ('Pool member / raid edits require an active mount. ' 'Please see the "Maintenance required" section.') handle_exception(Exception(e_msg), request) if command == 'remove' and \ request.data.get('disks', []) == ['missing']: disks = [] logger.debug('Remove missing request skipping disk validation') else: disks = [self._validate_disk_id(diskId, request) for diskId in request.data.get('disks', [])] num_disks_selected = len(disks) dnames = self._role_filter_disk_names(disks, request) new_raid = request.data.get('raid_level', pool.raid) if (command == 'add'): # Only attached disks can be selected during an add operation. num_total_attached_disks = pool.disk_set.attached().count() \ + num_disks_selected for d in disks: if (d.pool is not None): e_msg = ('Disk ({}) cannot be added to this pool ({}) ' 'because it belongs to another pool ({})' '.').format(d.name, pool.name, d.pool.name) handle_exception(Exception(e_msg), request) if (d.btrfs_uuid is not None): e_msg = ('Disk ({}) has a BTRFS filesystem from the ' 'past. If you really like to add it, wipe it ' 'from the Storage -> Disks screen of the ' 'web-ui.').format(d.name) handle_exception(Exception(e_msg), request) if pool.raid == 'single' and new_raid == 'raid10': # TODO: Consider removing once we have better space calc. # Avoid extreme raid level change upwards (space issues). e_msg = ('Pool migration from {} to {} is not ' 'supported.').format(pool.raid, new_raid) handle_exception(Exception(e_msg), request) if (new_raid == 'raid10' and num_total_attached_disks < 4): e_msg = ('A minimum of 4 drives are required for the ' 'raid level: raid10.') handle_exception(Exception(e_msg), request) if (new_raid == 'raid6' and num_total_attached_disks < 3): e_msg = ('A minimum of 3 drives are required for the ' 'raid level: raid6.') handle_exception(Exception(e_msg), request) if (new_raid == 'raid5' and num_total_attached_disks < 2): e_msg = ('A minimum of 2 drives are required for the ' 'raid level: raid5.') handle_exception(Exception(e_msg), request) if (PoolBalance.objects.filter( pool=pool, status__regex=r'(started|running|cancelling|pausing|paused)').exists()): # noqa E501 e_msg = ('A Balance process is already running or paused ' 'for this pool ({}). Resize is not supported ' 'during a balance process.').format(pool.name) handle_exception(Exception(e_msg), request) # TODO: run resize_pool() as async task like start_balance() resize_pool(pool, dnames) # None if no action force = False # During dev add we also offer raid level change, if selected # blanket apply '-f' to allow for reducing metadata integrity. if new_raid != pool.raid: force = True tid = self._balance_start(pool, force=force, convert=new_raid) ps = PoolBalance(pool=pool, tid=tid) ps.save() pool.raid = new_raid for d_o in disks: d_o.pool = pool d_o.save() # Now we ensure udev info is updated via system wide trigger trigger_udev_update() elif (command == 'remove'): if (new_raid != pool.raid): e_msg = ('Raid configuration cannot be changed while ' 'removing disks.') handle_exception(Exception(e_msg), request) detached_disks_selected = 0 for d in disks: # to be removed if (d.pool is None or d.pool != pool): e_msg = ('Disk ({}) cannot be removed because it does ' 'not belong to this ' 'pool ({}).').format(d.name, pool.name) handle_exception(Exception(e_msg), request) if re.match('detached-', d.name) is not None: detached_disks_selected += 1 if detached_disks_selected >= 3: # Artificial constraint but no current btrfs raid level yet # allows for > 2 dev detached and we have a mounted vol. e_msg = ('We currently only support removing two' 'detached disks at a time.') handle_exception(Exception(e_msg), request) attached_disks_selected = ( num_disks_selected - detached_disks_selected) remaining_attached_disks = ( pool.disk_set.attached().count() - attached_disks_selected) if (pool.raid == 'raid0'): e_msg = ('Disks cannot be removed from a pool with this ' 'raid ({}) configuration.').format(pool.raid) handle_exception(Exception(e_msg), request) if (pool.raid == 'raid1' and remaining_attached_disks < 2): e_msg = ('Disks cannot be removed from this pool ' 'because its raid configuration (raid1) ' 'requires a minimum of 2 disks.') handle_exception(Exception(e_msg), request) if (pool.raid == 'raid10' and remaining_attached_disks < 4): e_msg = ('Disks cannot be removed from this pool ' 'because its raid configuration (raid10) ' 'requires a minimum of 4 disks.') handle_exception(Exception(e_msg), request) if (pool.raid == 'raid5' and remaining_attached_disks < 2): e_msg = ('Disks cannot be removed from this pool because ' 'its raid configuration (raid5) requires a ' 'minimum of 2 disks.') handle_exception(Exception(e_msg), request) if (pool.raid == 'raid6' and remaining_attached_disks < 3): e_msg = ('Disks cannot be removed from this pool because ' 'its raid configuration (raid6) requires a ' 'minimum of 3 disks.') handle_exception(Exception(e_msg), request) usage = pool_usage('/%s/%s' % (settings.MNT_PT, pool.name)) size_cut = 0 for d in disks: size_cut += d.size if size_cut >= (pool.size - usage): e_msg = ('Removing disks ({}) may shrink the pool by ' '{} KB, which is greater than available free ' 'space {} KB. This is ' 'not supported.').format(dnames, size_cut, usage) handle_exception(Exception(e_msg), request) # TODO: run resize_pool() as async task like start_balance(), # particularly important on device delete as it initiates an # internal volume balance which cannot be monitored by: # btrfs balance status. # See https://github.com/rockstor/rockstor-core/issues/1722 # Hence we need also to add a 'DIY' status / percentage # reporting method. resize_pool(pool, dnames, add=False) # None if no action # Unlike resize_pool() with add=True a delete has an implicit # balance where the deleted disks contents are re-distributed # across the remaining disks. for d in disks: d.pool = None d.save() else: e_msg = 'Command ({}) is not supported.'.format(command) handle_exception(Exception(e_msg), request) pool.size = pool.usage_bound() pool.save() return Response(PoolInfoSerializer(pool).data)
def put(self, request, pname, command): """ resize a pool. @pname: pool's name @command: 'add' - add a list of disks and hence expand the pool 'remove' - remove a list of disks and hence shrink the pool """ with self._handle_exception(request): if pname == settings.ROOT_POOL: e_msg = ( "Edit operations are not allowed on this Pool(%s) " "as it contains the operating system." % pname ) handle_exception(Exception(e_msg), request) try: pool = Pool.objects.get(name=pname) except: e_msg = "Pool(%s) does not exist." % pname handle_exception(Exception(e_msg), request) if command == "remount": return self._remount(request, pool) disks = [self._validate_disk(d, request) for d in request.data.get("disks")] num_new_disks = len(disks) if num_new_disks == 0: e_msg = "List of disks in the input cannot be empty." handle_exception(Exception(e_msg), request) dnames = [d.name for d in disks] mount_disk = Disk.objects.filter(pool=pool)[0].name new_raid = request.data.get("raid_level", pool.raid) num_total_disks = Disk.objects.filter(pool=pool).count() + num_new_disks usage = pool_usage("/%s/%s" % (settings.MNT_PT, pool.name)) # free_percent = (usage[2]/usage[0]) * 100 free_percent = (usage[2] * 100) / usage[0] threshold_percent = self.ADD_THRESHOLD * 100 if command == "add": for d in disks: if d.pool is not None: e_msg = ( "Disk(%s) cannot be added to this Pool(%s) " "because it belongs to another pool(%s)" % (d.name, pool.name, d.pool.name) ) handle_exception(Exception(e_msg), request) if d.btrfs_uuid is not None: e_msg = ( "Disk(%s) has a BTRFS filesystem from the " "past. If you really like to add it, wipe it " "from the Storage -> Disks screen of the " "web-ui" % d.name ) handle_exception(Exception(e_msg), request) if new_raid not in self.SUPPORTED_MIGRATIONS[pool.raid]: e_msg = "Pool migration from %s to %s is not supported." % (pool.raid, new_raid) handle_exception(Exception(e_msg), request) if PoolBalance.objects.filter(pool=pool, status__regex=r"(started|running)").exists(): e_msg = ( "A Balance process is already running for this " "pool(%s). Resize is not supported during a " "balance process." % pool.name ) handle_exception(Exception(e_msg), request) if free_percent < threshold_percent: e_msg = ( "Resize is only supported when there is at least " "%d percent free space available. But currently " "only %d percent is free. Remove some data and " "try again." % (threshold_percent, free_percent) ) handle_exception(Exception(e_msg), request) if new_raid != pool.raid: if (pool.raid in ("single", "raid0")) and new_raid in ("raid1", "raid10"): cur_num_disks = num_total_disks - num_new_disks if num_new_disks < cur_num_disks: e_msg = ( "For single/raid0 to raid1/raid10 " "conversion, at least as many as present " "number of disks must be added. %d " "disks are provided, but at least %d are " "required." % (num_new_disks, cur_num_disks) ) handle_exception(Exception(e_msg), request) resize_pool(pool, mount_disk, dnames) balance_pid = balance_start(pool, mount_disk, convert=new_raid) ps = PoolBalance(pool=pool, pid=balance_pid) ps.save() pool.raid = new_raid for d_o in disks: d_o.pool = pool d_o.save() elif command == "remove": if new_raid != pool.raid: e_msg = "Raid configuration cannot be changed while " "removing disks" handle_exception(Exception(e_msg), request) for d in disks: if d.pool is None or d.pool != pool: e_msg = "Disk(%s) cannot be removed because it does " "not belong to this Pool(%s)" % ( d.name, pool.name, ) handle_exception(Exception(e_msg), request) remaining_disks = Disk.objects.filter(pool=pool).count() - num_new_disks if pool.raid in ("raid0", "single"): e_msg = "Disks cannot be removed from a pool with this " "raid(%s) configuration" % pool.raid handle_exception(Exception(e_msg), request) if pool.raid in ("raid5", "raid6"): e_msg = "Disk removal is not supported for pools with " "raid5/6 configuration" handle_exception(Exception(e_msg), request) if pool.raid == "raid10": if num_new_disks != 2: e_msg = ( "Only two disks can be removed at once from " "this pool because of its raid " "configuration(%s)" % pool.raid ) handle_exception(Exception(e_msg), request) elif remaining_disks < 4: e_msg = ( "Disks cannot be removed from this pool " "because its raid configuration(%s) " "requires a minimum of 4 disks" % pool.raid ) handle_exception(Exception(e_msg), request) elif pool.raid == "raid1": if num_new_disks != 1: e_msg = ( "Only one disk can be removed at once from " "this pool because of its raid " "configuration(%s)" % pool.raid ) handle_exception(Exception(e_msg), request) elif remaining_disks < 2: e_msg = ( "Disks cannot be removed from this pool " "because its raid configuration(%s) " "requires a minimum of 2 disks" % pool.raid ) handle_exception(Exception(e_msg), request) threshold_percent = 100 - threshold_percent if free_percent < threshold_percent: e_msg = ( "Removing disks is only supported when there is " "at least %d percent free space available. But " "currently only %d percent is free. Remove some " "data and try again." % (threshold_percent, free_percent) ) handle_exception(Exception(e_msg), request) resize_pool(pool, mount_disk, dnames, add=False) balance_pid = balance_start(pool, mount_disk) ps = PoolBalance(pool=pool, pid=balance_pid) ps.save() for d in disks: d.pool = None d.save() else: e_msg = "command(%s) is not supported." % command handle_exception(Exception(e_msg), request) usage = pool_usage("/%s/%s" % (settings.MNT_PT, pool.name)) pool.size = usage[0] pool.save() return Response(PoolInfoSerializer(pool).data)
def put(self, request, pname, command): """ resize a pool. @pname: pool's name @command: 'add' - add a list of disks and hence expand the pool 'remove' - remove a list of disks and hence shrink the pool """ with self._handle_exception(request): if (pname == settings.ROOT_POOL): e_msg = ('Edit operations are not allowed on this Pool(%s) ' 'as it contains the operating system.' % pname) handle_exception(Exception(e_msg), request) try: pool = Pool.objects.get(name=pname) except: e_msg = ('Pool(%s) does not exist.' % pname) handle_exception(Exception(e_msg), request) if (command == 'remount'): return self._remount(request, pool) disks = [ self._validate_disk(d, request) for d in request.data.get('disks') ] num_new_disks = len(disks) if (num_new_disks == 0): e_msg = ('List of disks in the input cannot be empty.') handle_exception(Exception(e_msg), request) dnames = [d.name for d in disks] mount_disk = Disk.objects.filter(pool=pool)[0].name new_raid = request.data.get('raid_level', pool.raid) num_total_disks = (Disk.objects.filter(pool=pool).count() + num_new_disks) usage = pool_usage('/%s/%s' % (settings.MNT_PT, pool.name)) # free_percent = (usage[2]/usage[0]) * 100 free_percent = (usage[2] * 100) / usage[0] threshold_percent = self.ADD_THRESHOLD * 100 if (command == 'add'): for d in disks: if (d.pool is not None): e_msg = ('Disk(%s) cannot be added to this Pool(%s) ' 'because it belongs to another pool(%s)' % (d.name, pool.name, d.pool.name)) handle_exception(Exception(e_msg), request) if (d.btrfs_uuid is not None): e_msg = ('Disk(%s) has a BTRFS filesystem from the ' 'past. If you really like to add it, wipe it ' 'from the Storage -> Disks screen of the ' 'web-ui' % d.name) handle_exception(Exception(e_msg), request) if (new_raid not in self.SUPPORTED_MIGRATIONS[pool.raid]): e_msg = ('Pool migration from %s to %s is not supported.' % (pool.raid, new_raid)) handle_exception(Exception(e_msg), request) if (PoolBalance.objects.filter( pool=pool, status__regex=r'(started|running)').exists()): e_msg = ('A Balance process is already running for this ' 'pool(%s). Resize is not supported during a ' 'balance process.' % pool.name) handle_exception(Exception(e_msg), request) if (free_percent < threshold_percent): e_msg = ('Resize is only supported when there is at least ' '%d percent free space available. But currently ' 'only %d percent is free. Remove some data and ' 'try again.' % (threshold_percent, free_percent)) handle_exception(Exception(e_msg), request) if (new_raid != pool.raid): if (((pool.raid in ('single', 'raid0')) and new_raid in ('raid1', 'raid10'))): cur_num_disks = num_total_disks - num_new_disks if (num_new_disks < cur_num_disks): e_msg = ('For single/raid0 to raid1/raid10 ' 'conversion, at least as many as present ' 'number of disks must be added. %d ' 'disks are provided, but at least %d are ' 'required.' % (num_new_disks, cur_num_disks)) handle_exception(Exception(e_msg), request) resize_pool(pool, mount_disk, dnames) balance_pid = balance_start(pool, mount_disk, convert=new_raid) ps = PoolBalance(pool=pool, pid=balance_pid) ps.save() pool.raid = new_raid for d_o in disks: d_o.pool = pool d_o.save() elif (command == 'remove'): if (new_raid != pool.raid): e_msg = ('Raid configuration cannot be changed while ' 'removing disks') handle_exception(Exception(e_msg), request) for d in disks: if (d.pool is None or d.pool != pool): e_msg = ('Disk(%s) cannot be removed because it does ' 'not belong to this Pool(%s)' % (d.name, pool.name)) handle_exception(Exception(e_msg), request) remaining_disks = (Disk.objects.filter(pool=pool).count() - num_new_disks) if (pool.raid in ( 'raid0', 'single', )): e_msg = ('Disks cannot be removed from a pool with this ' 'raid(%s) configuration' % pool.raid) handle_exception(Exception(e_msg), request) if (pool.raid in ( 'raid5', 'raid6', )): e_msg = ('Disk removal is not supported for pools with ' 'raid5/6 configuration') handle_exception(Exception(e_msg), request) if (pool.raid == 'raid10'): if (num_new_disks != 2): e_msg = ('Only two disks can be removed at once from ' 'this pool because of its raid ' 'configuration(%s)' % pool.raid) handle_exception(Exception(e_msg), request) elif (remaining_disks < 4): e_msg = ('Disks cannot be removed from this pool ' 'because its raid configuration(%s) ' 'requires a minimum of 4 disks' % pool.raid) handle_exception(Exception(e_msg), request) elif (pool.raid == 'raid1'): if (num_new_disks != 1): e_msg = ('Only one disk can be removed at once from ' 'this pool because of its raid ' 'configuration(%s)' % pool.raid) handle_exception(Exception(e_msg), request) elif (remaining_disks < 2): e_msg = ('Disks cannot be removed from this pool ' 'because its raid configuration(%s) ' 'requires a minimum of 2 disks' % pool.raid) handle_exception(Exception(e_msg), request) threshold_percent = 100 - threshold_percent if (free_percent < threshold_percent): e_msg = ('Removing disks is only supported when there is ' 'at least %d percent free space available. But ' 'currently only %d percent is free. Remove some ' 'data and try again.' % (threshold_percent, free_percent)) handle_exception(Exception(e_msg), request) resize_pool(pool, mount_disk, dnames, add=False) balance_pid = balance_start(pool, mount_disk) ps = PoolBalance(pool=pool, pid=balance_pid) ps.save() for d in disks: d.pool = None d.save() else: e_msg = ('command(%s) is not supported.' % command) handle_exception(Exception(e_msg), request) usage = pool_usage('/%s/%s' % (settings.MNT_PT, pool.name)) pool.size = usage[0] pool.save() return Response(PoolInfoSerializer(pool).data)