def post(self, request): """ input is a list of disks, raid_level and name of the pool. """ with self._handle_exception(request): disks = [ self._validate_disk(d, request) for d in request.data.get("disks") ] pname = request.data["pname"] if re.match("{}$".format(settings.POOL_REGEX), pname) is None: e_msg = ("Invalid characters in pool name. Following " "characters are allowed: letter(a-z or A-Z), " "digit(0-9), " "hyphen(-), underscore(_) or a period(.).") handle_exception(Exception(e_msg), request) if len(pname) > 255: e_msg = "Pool name must be less than 255 characters." handle_exception(Exception(e_msg), request) if Pool.objects.filter(name=pname).exists(): e_msg = ("Pool ({}) already exists. Choose a different name." ).format(pname) handle_exception(Exception(e_msg), request) if Share.objects.filter(name=pname).exists(): e_msg = ("A share with this name ({}) exists. Pool and share " "names must be distinct. " "Choose a different name.").format(pname) handle_exception(Exception(e_msg), request) for d in disks: if d.btrfs_uuid is not None: e_msg = ("Another BTRFS filesystem exists on this " "disk ({}). " "Erase the disk and try again.").format(d.name) handle_exception(Exception(e_msg), request) raid_level = request.data["raid_level"] if raid_level not in self.RAID_LEVELS: e_msg = ("Unsupported raid level. Use one of: {}.").format( self.RAID_LEVELS) handle_exception(Exception(e_msg), request) # consolidated raid0 & raid 1 disk check if raid_level in self.RAID_LEVELS[1:3] and len(disks) <= 1: e_msg = ( "At least 2 disks are required for the raid level: {}." ).format(raid_level) handle_exception(Exception(e_msg), request) if raid_level == self.RAID_LEVELS[3]: if len(disks) < 4: e_msg = ( "A minimum of 4 drives are required for the raid level: {}." ).format(raid_level) handle_exception(Exception(e_msg), request) if raid_level == self.RAID_LEVELS[4] and len(disks) < 2: e_msg = ("2 or more disks are required for the raid level: {}." ).format(raid_level) handle_exception(Exception(e_msg), request) if raid_level == self.RAID_LEVELS[5] and len(disks) < 3: e_msg = ("3 or more disks are required for the raid level: {}." ).format(raid_level) handle_exception(Exception(e_msg), request) compression = self._validate_compression(request) mnt_options = self._validate_mnt_options(request) dnames = self._role_filter_disk_names(disks, request) p = Pool( name=pname, raid=raid_level, compression=compression, mnt_options=mnt_options, ) p.save() p.disk_set.add(*disks) # added for loop to save disks appears p.disk_set.add(*disks) was # not saving disks in test environment for d in disks: d.pool = p d.save() add_pool(p, dnames) p.size = p.usage_bound() p.uuid = btrfs_uuid(dnames[0]) p.save() # Now we ensure udev info is updated via system wide trigger # as per pool resize add, only here it is for a new pool. trigger_udev_update() return Response(PoolInfoSerializer(p).data)
def put(self, request, pid, command): """ resize a pool. @pname: pool's name @command: 'add' - add a list of disks and hence expand the pool 'remove' - remove a list of disks and hence shrink the pool 'remount' - remount the pool, to apply changed mount options 'quotas' - request pool quota setting change """ with self._handle_exception(request): try: pool = Pool.objects.get(id=pid) except: e_msg = "Pool with id ({}) does not exist.".format(pid) handle_exception(Exception(e_msg), request) if pool.role == "root" and command != "quotas": e_msg = ("Edit operations are not allowed on this pool ({}) " "as it contains the operating " "system.").format(pool.name) handle_exception(Exception(e_msg), request) if command == "remount": return self._remount(request, pool) if command == "quotas": # There is a pending btrfs change that allows for quota state # change on unmounted Volumes (pools). return self._quotas(request, pool) # Establish missing and detached disk removal request flag defaults: remove_missing_disk_request = False all_members_detached = False if command == "remove" and request.data.get("disks", []) == ["missing"]: remove_missing_disk_request = True if (pool.disk_set.filter(name__startswith="detached-").count() == pool.disk_set.count()): all_members_detached = True if not pool.is_mounted: # If we are asked to remove the last disk in a pool and it's detached # then user has already been notified to not remove it if it's to be # re-attached. So skip our mount exception as not possible anyway unless # re-attached and we have already indicated that possible path. # All works accounts for all pool members in detached state. if all_members_detached: logger.info( "Skipping mount requirement: all pool's member are detached." ) else: e_msg = ( "Pool member / raid edits require an active mount. " 'Please see the "Maintenance required" section.') handle_exception(Exception(e_msg), request) if remove_missing_disk_request: disks = [] logger.debug( "Remove missing request, so skipping disk validation") else: disks = [ self._validate_disk_id(diskId, request) for diskId in request.data.get("disks", []) ] num_disks_selected = len(disks) dnames = self._role_filter_disk_names(disks, request) new_raid = request.data.get("raid_level", pool.raid) if command == "add": # Only attached disks can be selected during an add operation. num_total_attached_disks = (pool.disk_set.attached().count() + num_disks_selected) for d in disks: if d.pool is not None: e_msg = ("Disk ({}) cannot be added to this pool ({}) " "because it belongs to another pool ({})" ".").format(d.name, pool.name, d.pool.name) handle_exception(Exception(e_msg), request) if d.btrfs_uuid is not None: e_msg = ("Disk ({}) has a BTRFS filesystem from the " "past. If you really like to add it, wipe it " "from the Storage -> Disks screen of the " "web-ui.").format(d.name) handle_exception(Exception(e_msg), request) if pool.raid == "single" and new_raid == "raid10": # TODO: Consider removing once we have better space calc. # Avoid extreme raid level change upwards (space issues). e_msg = ("Pool migration from {} to {} is not supported." ).format(pool.raid, new_raid) handle_exception(Exception(e_msg), request) if new_raid == "raid10" and num_total_attached_disks < 4: e_msg = ("A minimum of 4 drives are required for the " "raid level: raid10.") handle_exception(Exception(e_msg), request) if new_raid == "raid6" and num_total_attached_disks < 3: e_msg = ("A minimum of 3 drives are required for the " "raid level: raid6.") handle_exception(Exception(e_msg), request) if new_raid == "raid5" and num_total_attached_disks < 2: e_msg = ("A minimum of 2 drives are required for the " "raid level: raid5.") handle_exception(Exception(e_msg), request) if PoolBalance.objects.filter( pool=pool, status__regex= r"(started|running|cancelling|pausing|paused)", ).exists(): # noqa E501 e_msg = ("A Balance process is already running or paused " "for this pool ({}). Resize is not supported " "during a balance process.").format(pool.name) handle_exception(Exception(e_msg), request) # _resize_pool_start() add dev mode is quick so no async or tid self._resize_pool_start(pool, dnames) force = False # During dev add we also offer raid level change, if selected # blanket apply '-f' to allow for reducing metadata integrity. if new_raid != pool.raid: force = True # Django-ztask initialization as balance is long running. tid = self._balance_start(pool, force=force, convert=new_raid) ps = PoolBalance(pool=pool, tid=tid) ps.save() pool.raid = new_raid for d_o in disks: d_o.pool = pool d_o.save() # Now we ensure udev info is updated via system wide trigger trigger_udev_update() elif command == "remove": if new_raid != pool.raid: e_msg = "Raid configuration cannot be changed while removing disks." handle_exception(Exception(e_msg), request) detached_disks_selected = 0 for d in disks: # to be removed if d.pool is None or d.pool != pool: e_msg = ("Disk ({}) cannot be removed because it does " "not belong to this " "pool ({}).").format(d.name, pool.name) handle_exception(Exception(e_msg), request) if re.match("detached-", d.name) is not None: detached_disks_selected += 1 if detached_disks_selected >= 2: # We translate the removal of a detached device into: # "btrfs device delete missing mnt_pt" # but only when appropriate, this removes the first 'missing' dev. # A detached disk is not necessarily missing, but an indication of # prior pool association. e_msg = ( "Detached disk selection is limited to a single device. " "If all Pool members are detached all will be removed " "and their pool automatically deleted there after.") handle_exception(Exception(e_msg), request) attached_disks_selected = num_disks_selected - detached_disks_selected remaining_attached_disks = (pool.disk_set.attached().count() - attached_disks_selected) # Add check for attempt to remove detached & attached disks concurrently if detached_disks_selected > 0 and attached_disks_selected > 0: e_msg = ( "Mixed detached and attached disk selection is " "not supported. Limit your selection to only attached " "disks, or a single detached disk.") handle_exception(Exception(e_msg), request) # Skip all further sanity checks when all members are detached. if not all_members_detached: if pool.raid == "raid0": e_msg = ( "Disks cannot be removed from a pool with this " "raid ({}) configuration.").format(pool.raid) handle_exception(Exception(e_msg), request) if pool.raid == "raid1" and remaining_attached_disks < 2: e_msg = ("Disks cannot be removed from this pool " "because its raid configuration (raid1) " "requires a minimum of 2 disks.") handle_exception(Exception(e_msg), request) if pool.raid == "raid10" and remaining_attached_disks < 4: e_msg = ("Disks cannot be removed from this pool " "because its raid configuration (raid10) " "requires a minimum of 4 disks.") handle_exception(Exception(e_msg), request) if pool.raid == "raid5" and remaining_attached_disks < 2: e_msg = ( "Disks cannot be removed from this pool because " "its raid configuration (raid5) requires a " "minimum of 2 disks.") handle_exception(Exception(e_msg), request) if pool.raid == "raid6" and remaining_attached_disks < 3: e_msg = ( "Disks cannot be removed from this pool because " "its raid configuration (raid6) requires a " "minimum of 3 disks.") handle_exception(Exception(e_msg), request) usage = pool_usage("/{}/{}".format(settings.MNT_PT, pool.name)) size_cut = 0 for d in disks: # to be removed size_cut += d.allocated available_free = pool.size - usage if size_cut >= available_free: e_msg = ("Removing disks ({}) may shrink the pool by " "{} KB, which is greater than available free " "space {} KB. This is " "not supported.").format( dnames, size_cut, available_free) handle_exception(Exception(e_msg), request) # Unlike resize_pool_start() with add=True a remove has an # implicit balance where the removed disks contents are # re-distributed across the remaining pool members. # This internal balance cannot currently be monitored by the # usual 'btrfs balance status /mnt_pt' command. So we have to # use our own mechanism to assess it's status. # Django-ztask initialization: tid = self._resize_pool_start(pool, dnames, add=False) ps = PoolBalance(pool=pool, tid=tid, internal=True) ps.save() # Setting disk.pool = None for all removed members is redundant # as our next disk scan will re-find them until such time as # our async task, and it's associated dev remove, has completed # it's internal balance. This can take hours. Except for db only # event of all_members_detached. else: # all_members_detached: # If all members are detached then delete pool associations for all. # We cannot mount and so cannot perform any resize or any further # pool member validation anyway. # N.B. on next pool refresh, no members leads to pool removal. for d in pool.disk_set.all(): d.pool = None d.save() else: e_msg = "Command ({}) is not supported.".format(command) handle_exception(Exception(e_msg), request) pool.size = pool.usage_bound() pool.save() return Response(PoolInfoSerializer(pool).data)
def post(self, request): """ input is a list of disks, raid_level and name of the pool. """ with self._handle_exception(request): disks = [ self._validate_disk(d, request) for d in request.data.get('disks') ] pname = request.data['pname'] if (re.match('%s$' % settings.POOL_REGEX, pname) is None): e_msg = ('Invalid characters in Pool name. Following ' 'characters are allowed: letter(a-z or A-Z), ' 'digit(0-9), ' 'hyphen(-), underscore(_) or a period(.).') handle_exception(Exception(e_msg), request) if (len(pname) > 255): e_msg = ('Pool name must be less than 255 characters') handle_exception(Exception(e_msg), request) if (Pool.objects.filter(name=pname).exists()): e_msg = ('Pool(%s) already exists. Choose a different name' % pname) handle_exception(Exception(e_msg), request) if (Share.objects.filter(name=pname).exists()): e_msg = ('A Share with this name(%s) exists. Pool and Share ' 'names ' 'must be distinct. Choose a different name' % pname) handle_exception(Exception(e_msg), request) for d in disks: if (d.btrfs_uuid is not None): e_msg = ('Another BTRFS filesystem exists on this ' 'disk(%s). Erase the disk and try again.' % d.name) handle_exception(Exception(e_msg), request) raid_level = request.data['raid_level'] if (raid_level not in self.RAID_LEVELS): e_msg = ('Unsupported raid level. use one of: {}'.format( self.RAID_LEVELS)) handle_exception(Exception(e_msg), request) # consolidated raid0 & raid 1 disk check if (raid_level in self.RAID_LEVELS[1:3] and len(disks) <= 1): e_msg = ('At least two disks are required for the raid level: ' '%s' % raid_level) handle_exception(Exception(e_msg), request) if (raid_level == self.RAID_LEVELS[3]): if (len(disks) < 4): e_msg = ('A minimum of Four drives are required for the ' 'raid level: %s' % raid_level) handle_exception(Exception(e_msg), request) if (raid_level == self.RAID_LEVELS[4] and len(disks) < 2): e_msg = ('Two or more disks are required for the raid ' 'level: %s' % raid_level) handle_exception(Exception(e_msg), request) if (raid_level == self.RAID_LEVELS[5] and len(disks) < 3): e_msg = ('Three or more disks are required for the raid ' 'level: %s' % raid_level) handle_exception(Exception(e_msg), request) compression = self._validate_compression(request) mnt_options = self._validate_mnt_options(request) dnames = self._role_filter_disk_names(disks, request) p = Pool(name=pname, raid=raid_level, compression=compression, mnt_options=mnt_options) p.save() p.disk_set.add(*disks) # added for loop to save disks appears p.disk_set.add(*disks) was # not saving disks in test environment for d in disks: d.pool = p d.save() add_pool(p, dnames) p.size = p.usage_bound() p.uuid = btrfs_uuid(dnames[0]) p.save() # Now we ensure udev info is updated via system wide trigger # as per pool resize add, only here it is for a new pool. trigger_udev_update() return Response(PoolInfoSerializer(p).data)
def put(self, request, pid, command): """ resize a pool. @pname: pool's name @command: 'add' - add a list of disks and hence expand the pool 'remove' - remove a list of disks and hence shrink the pool """ with self._handle_exception(request): try: pool = Pool.objects.get(id=pid) except: e_msg = ('Pool(%d) does not exist.' % pid) handle_exception(Exception(e_msg), request) if (pool.role == 'root'): e_msg = ('Edit operations are not allowed on this Pool(%d) ' 'as it contains the operating system.' % pid) handle_exception(Exception(e_msg), request) if (command == 'remount'): return self._remount(request, pool) disks = [ self._validate_disk(d, request) for d in request.data.get('disks', []) ] num_new_disks = len(disks) dnames = self._role_filter_disk_names(disks, request) new_raid = request.data.get('raid_level', pool.raid) num_total_disks = (Disk.objects.filter(pool=pool).count() + num_new_disks) if (command == 'add'): for d in disks: if (d.pool is not None): e_msg = ('Disk(%s) cannot be added to this Pool(%s) ' 'because it belongs to another pool(%s)' % (d.name, pool.name, d.pool.name)) handle_exception(Exception(e_msg), request) if (d.btrfs_uuid is not None): e_msg = ('Disk(%s) has a BTRFS filesystem from the ' 'past. If you really like to add it, wipe it ' 'from the Storage -> Disks screen of the ' 'web-ui' % d.name) handle_exception(Exception(e_msg), request) if (pool.raid != 'single' and new_raid == 'single'): e_msg = ('Pool migration from %s to %s is not supported.' % (pool.raid, new_raid)) handle_exception(Exception(e_msg), request) if (new_raid == 'raid10' and num_total_disks < 4): e_msg = ('A minimum of Four drives are required for the ' 'raid level: raid10') handle_exception(Exception(e_msg), request) if (new_raid == 'raid6' and num_total_disks < 3): e_msg = ('A minimum of Three drives are required for the ' 'raid level: raid6') handle_exception(Exception(e_msg), request) if (new_raid == 'raid5' and num_total_disks < 2): e_msg = ('A minimum of Two drives are required for the ' 'raid level: raid5') handle_exception(Exception(e_msg), request) if (PoolBalance.objects.filter( pool=pool, status__regex= r'(started|running|cancelling|pausing|paused)').exists( )): # noqa E501 e_msg = ('A Balance process is already running or paused ' 'for this pool(%s). Resize is not supported ' 'during a balance process.' % pool.name) handle_exception(Exception(e_msg), request) resize_pool(pool, dnames) tid = self._balance_start(pool, convert=new_raid) ps = PoolBalance(pool=pool, tid=tid) ps.save() pool.raid = new_raid for d_o in disks: d_o.pool = pool d_o.save() # Now we ensure udev info is updated via system wide trigger trigger_udev_update() elif (command == 'remove'): if (new_raid != pool.raid): e_msg = ('Raid configuration cannot be changed while ' 'removing disks') handle_exception(Exception(e_msg), request) for d in disks: if (d.pool is None or d.pool != pool): e_msg = ('Disk(%s) cannot be removed because it does ' 'not belong to this Pool(%s)' % (d.name, pool.name)) handle_exception(Exception(e_msg), request) remaining_disks = (Disk.objects.filter(pool=pool).count() - num_new_disks) if (pool.raid == 'raid0'): e_msg = ('Disks cannot be removed from a pool with this ' 'raid(%s) configuration' % pool.raid) handle_exception(Exception(e_msg), request) if (pool.raid == 'raid1' and remaining_disks < 2): e_msg = ('Disks cannot be removed from this pool ' 'because its raid configuration(raid1) ' 'requires a minimum of 2 disks') handle_exception(Exception(e_msg), request) if (pool.raid == 'raid10' and remaining_disks < 4): e_msg = ('Disks cannot be removed from this pool ' 'because its raid configuration(raid10) ' 'requires a minimum of 4 disks') handle_exception(Exception(e_msg), request) if (pool.raid == 'raid5' and remaining_disks < 2): e_msg = ('Disks cannot be removed from this pool because ' 'its raid configuration(raid5) requires a ' 'minimum of 2 disks') handle_exception(Exception(e_msg), request) if (pool.raid == 'raid6' and remaining_disks < 3): e_msg = ('Disks cannot be removed from this pool because ' 'its raid configuration(raid6) requires a ' 'minimum of 3 disks') handle_exception(Exception(e_msg), request) usage = pool_usage('/%s/%s' % (settings.MNT_PT, pool.name)) size_cut = 0 for d in disks: size_cut += d.size if size_cut >= (pool.size - usage): e_msg = ('Removing these(%s) disks may shrink the pool by ' '%dKB, which is greater than available free space' ' %dKB. This is not supported.' % (dnames, size_cut, usage)) handle_exception(Exception(e_msg), request) resize_pool(pool, dnames, add=False) tid = self._balance_start(pool) ps = PoolBalance(pool=pool, tid=tid) ps.save() for d in disks: d.pool = None d.save() else: e_msg = ('command(%s) is not supported.' % command) handle_exception(Exception(e_msg), request) pool.size = pool.usage_bound() pool.save() return Response(PoolInfoSerializer(pool).data)
def put(self, request, pid, command): """ resize a pool. @pname: pool's name @command: 'add' - add a list of disks and hence expand the pool 'remove' - remove a list of disks and hence shrink the pool 'remount' - remount the pool, to apply changed mount options 'quotas' - request pool quota setting change """ with self._handle_exception(request): try: pool = Pool.objects.get(id=pid) except: e_msg = 'Pool with id ({}) does not exist.'.format(pid) handle_exception(Exception(e_msg), request) if (pool.role == 'root' and command != 'quotas'): e_msg = ('Edit operations are not allowed on this pool ({}) ' 'as it contains the operating ' 'system.').format(pool.name) handle_exception(Exception(e_msg), request) if (command == 'remount'): return self._remount(request, pool) if (command == 'quotas'): # There is a pending btrfs change that allows for quota state # change on unmounted Volumes (pools). return self._quotas(request, pool) if not pool.is_mounted: e_msg = ('Pool member / raid edits require an active mount. ' 'Please see the "Maintenance required" section.') handle_exception(Exception(e_msg), request) if command == 'remove' and \ request.data.get('disks', []) == ['missing']: disks = [] logger.debug('Remove missing request skipping disk validation') else: disks = [ self._validate_disk_id(diskId, request) for diskId in request.data.get('disks', []) ] num_disks_selected = len(disks) dnames = self._role_filter_disk_names(disks, request) new_raid = request.data.get('raid_level', pool.raid) if (command == 'add'): # Only attached disks can be selected during an add operation. num_total_attached_disks = pool.disk_set.attached().count() \ + num_disks_selected for d in disks: if (d.pool is not None): e_msg = ('Disk ({}) cannot be added to this pool ({}) ' 'because it belongs to another pool ({})' '.').format(d.name, pool.name, d.pool.name) handle_exception(Exception(e_msg), request) if (d.btrfs_uuid is not None): e_msg = ('Disk ({}) has a BTRFS filesystem from the ' 'past. If you really like to add it, wipe it ' 'from the Storage -> Disks screen of the ' 'web-ui.').format(d.name) handle_exception(Exception(e_msg), request) if pool.raid == 'single' and new_raid == 'raid10': # TODO: Consider removing once we have better space calc. # Avoid extreme raid level change upwards (space issues). e_msg = ('Pool migration from {} to {} is not ' 'supported.').format(pool.raid, new_raid) handle_exception(Exception(e_msg), request) if (new_raid == 'raid10' and num_total_attached_disks < 4): e_msg = ('A minimum of 4 drives are required for the ' 'raid level: raid10.') handle_exception(Exception(e_msg), request) if (new_raid == 'raid6' and num_total_attached_disks < 3): e_msg = ('A minimum of 3 drives are required for the ' 'raid level: raid6.') handle_exception(Exception(e_msg), request) if (new_raid == 'raid5' and num_total_attached_disks < 2): e_msg = ('A minimum of 2 drives are required for the ' 'raid level: raid5.') handle_exception(Exception(e_msg), request) if (PoolBalance.objects.filter( pool=pool, status__regex= r'(started|running|cancelling|pausing|paused)').exists( )): # noqa E501 e_msg = ('A Balance process is already running or paused ' 'for this pool ({}). Resize is not supported ' 'during a balance process.').format(pool.name) handle_exception(Exception(e_msg), request) # _resize_pool_start() add dev mode is quick so no async or tid self._resize_pool_start(pool, dnames) force = False # During dev add we also offer raid level change, if selected # blanket apply '-f' to allow for reducing metadata integrity. if new_raid != pool.raid: force = True # Django-ztask initialization as balance is long running. tid = self._balance_start(pool, force=force, convert=new_raid) ps = PoolBalance(pool=pool, tid=tid) ps.save() pool.raid = new_raid for d_o in disks: d_o.pool = pool d_o.save() # Now we ensure udev info is updated via system wide trigger trigger_udev_update() elif (command == 'remove'): if (new_raid != pool.raid): e_msg = ('Raid configuration cannot be changed while ' 'removing disks.') handle_exception(Exception(e_msg), request) detached_disks_selected = 0 for d in disks: # to be removed if (d.pool is None or d.pool != pool): e_msg = ('Disk ({}) cannot be removed because it does ' 'not belong to this ' 'pool ({}).').format(d.name, pool.name) handle_exception(Exception(e_msg), request) if re.match('detached-', d.name) is not None: detached_disks_selected += 1 if detached_disks_selected >= 3: # Artificial constraint but no current btrfs raid level yet # allows for > 2 dev detached and we have a mounted vol. e_msg = ('We currently only support removing two' 'detached disks at a time.') handle_exception(Exception(e_msg), request) attached_disks_selected = (num_disks_selected - detached_disks_selected) remaining_attached_disks = (pool.disk_set.attached().count() - attached_disks_selected) if (pool.raid == 'raid0'): e_msg = ('Disks cannot be removed from a pool with this ' 'raid ({}) configuration.').format(pool.raid) handle_exception(Exception(e_msg), request) if (pool.raid == 'raid1' and remaining_attached_disks < 2): e_msg = ('Disks cannot be removed from this pool ' 'because its raid configuration (raid1) ' 'requires a minimum of 2 disks.') handle_exception(Exception(e_msg), request) if (pool.raid == 'raid10' and remaining_attached_disks < 4): e_msg = ('Disks cannot be removed from this pool ' 'because its raid configuration (raid10) ' 'requires a minimum of 4 disks.') handle_exception(Exception(e_msg), request) if (pool.raid == 'raid5' and remaining_attached_disks < 2): e_msg = ('Disks cannot be removed from this pool because ' 'its raid configuration (raid5) requires a ' 'minimum of 2 disks.') handle_exception(Exception(e_msg), request) if (pool.raid == 'raid6' and remaining_attached_disks < 3): e_msg = ('Disks cannot be removed from this pool because ' 'its raid configuration (raid6) requires a ' 'minimum of 3 disks.') handle_exception(Exception(e_msg), request) usage = pool_usage('/%s/%s' % (settings.MNT_PT, pool.name)) size_cut = 0 for d in disks: size_cut += d.allocated if size_cut >= (pool.size - usage): e_msg = ('Removing disks ({}) may shrink the pool by ' '{} KB, which is greater than available free ' 'space {} KB. This is ' 'not supported.').format(dnames, size_cut, usage) handle_exception(Exception(e_msg), request) # Unlike resize_pool_start() with add=True a remove has an # implicit balance where the removed disks contents are # re-distributed across the remaining pool members. # This internal balance cannot currently be monitored by the # usual 'btrfs balance status /mnt_pt' command. So we have to # use our own mechanism to assess it's status. # Django-ztask initialization: tid = self._resize_pool_start(pool, dnames, add=False) ps = PoolBalance(pool=pool, tid=tid, internal=True) ps.save() # Setting disk.pool = None for all removed members is redundant # as our next disk scan will re-find them until such time as # our async task, and it's associated dev remove, has completed # it's internal balance. This can take hours. else: e_msg = 'Command ({}) is not supported.'.format(command) handle_exception(Exception(e_msg), request) pool.size = pool.usage_bound() pool.save() return Response(PoolInfoSerializer(pool).data)
def put(self, request, pid, command): """ resize a pool. @pname: pool's name @command: 'add' - add a list of disks and hence expand the pool 'remove' - remove a list of disks and hence shrink the pool """ with self._handle_exception(request): try: pool = Pool.objects.get(id=pid) except: e_msg = 'Pool with id ({}) does not exist.'.format(pid) handle_exception(Exception(e_msg), request) if (pool.role == 'root' and command != 'quotas'): e_msg = ('Edit operations are not allowed on this pool ({}) ' 'as it contains the operating ' 'system.').format(pool.name) handle_exception(Exception(e_msg), request) if (command == 'remount'): return self._remount(request, pool) if (command == 'quotas'): return self._quotas(request, pool) disks = [ self._validate_disk(d, request) for d in request.data.get('disks', []) ] num_new_disks = len(disks) dnames = self._role_filter_disk_names(disks, request) new_raid = request.data.get('raid_level', pool.raid) num_total_disks = (Disk.objects.filter(pool=pool).count() + num_new_disks) if (command == 'add'): for d in disks: if (d.pool is not None): e_msg = ('Disk ({}) cannot be added to this pool ({}) ' 'because it belongs to another pool ({})' '.').format(d.name, pool.name, d.pool.name) handle_exception(Exception(e_msg), request) if (d.btrfs_uuid is not None): e_msg = ('Disk ({}) has a BTRFS filesystem from the ' 'past. If you really like to add it, wipe it ' 'from the Storage -> Disks screen of the ' 'web-ui.').format(d.name) handle_exception(Exception(e_msg), request) if pool.raid == 'single' and new_raid == 'raid10': # TODO: Consider removing once we have better space calc. # Avoid extreme raid level change upwards (space issues). e_msg = ('Pool migration from {} to {} is not ' 'supported.').format(pool.raid, new_raid) handle_exception(Exception(e_msg), request) if (new_raid == 'raid10' and num_total_disks < 4): e_msg = ('A minimum of 4 drives are required for the ' 'raid level: raid10.') handle_exception(Exception(e_msg), request) if (new_raid == 'raid6' and num_total_disks < 3): e_msg = ('A minimum of 3 drives are required for the ' 'raid level: raid6.') handle_exception(Exception(e_msg), request) if (new_raid == 'raid5' and num_total_disks < 2): e_msg = ('A minimum of 2 drives are required for the ' 'raid level: raid5.') handle_exception(Exception(e_msg), request) if (PoolBalance.objects.filter( pool=pool, status__regex= r'(started|running|cancelling|pausing|paused)').exists( )): # noqa E501 e_msg = ('A Balance process is already running or paused ' 'for this pool ({}). Resize is not supported ' 'during a balance process.').format(pool.name) handle_exception(Exception(e_msg), request) resize_pool(pool, dnames) # During dev add we also offer raid level change, if selected # blanket apply '-f' to allow for reducing metadata integrity. force = False if new_raid != pool.raid: force = True tid = self._balance_start(pool, force=force, convert=new_raid) ps = PoolBalance(pool=pool, tid=tid) ps.save() pool.raid = new_raid for d_o in disks: d_o.pool = pool d_o.save() # Now we ensure udev info is updated via system wide trigger trigger_udev_update() elif (command == 'remove'): if (new_raid != pool.raid): e_msg = ('Raid configuration cannot be changed while ' 'removing disks.') handle_exception(Exception(e_msg), request) for d in disks: if (d.pool is None or d.pool != pool): e_msg = ('Disk ({}) cannot be removed because it does ' 'not belong to this ' 'pool ({}).').format(d.name, pool.name) handle_exception(Exception(e_msg), request) remaining_disks = (Disk.objects.filter(pool=pool).count() - num_new_disks) if (pool.raid == 'raid0'): e_msg = ('Disks cannot be removed from a pool with this ' 'raid ({}) configuration.').format(pool.raid) handle_exception(Exception(e_msg), request) if (pool.raid == 'raid1' and remaining_disks < 2): e_msg = ('Disks cannot be removed from this pool ' 'because its raid configuration (raid1) ' 'requires a minimum of 2 disks.') handle_exception(Exception(e_msg), request) if (pool.raid == 'raid10' and remaining_disks < 4): e_msg = ('Disks cannot be removed from this pool ' 'because its raid configuration (raid10) ' 'requires a minimum of 4 disks.') handle_exception(Exception(e_msg), request) if (pool.raid == 'raid5' and remaining_disks < 2): e_msg = ('Disks cannot be removed from this pool because ' 'its raid configuration (raid5) requires a ' 'minimum of 2 disks.') handle_exception(Exception(e_msg), request) if (pool.raid == 'raid6' and remaining_disks < 3): e_msg = ('Disks cannot be removed from this pool because ' 'its raid configuration (raid6) requires a ' 'minimum of 3 disks.') handle_exception(Exception(e_msg), request) usage = pool_usage('/%s/%s' % (settings.MNT_PT, pool.name)) size_cut = 0 for d in disks: size_cut += d.size if size_cut >= (pool.size - usage): e_msg = ('Removing disks ({}) may shrink the pool by ' '{} KB, which is greater than available free ' 'space {} KB. This is ' 'not supported.').format(dnames, size_cut, usage) handle_exception(Exception(e_msg), request) resize_pool(pool, dnames, add=False) tid = self._balance_start(pool) ps = PoolBalance(pool=pool, tid=tid) ps.save() for d in disks: d.pool = None d.save() else: e_msg = 'Command ({}) is not supported.'.format(command) handle_exception(Exception(e_msg), request) pool.size = pool.usage_bound() pool.save() return Response(PoolInfoSerializer(pool).data)
def put(self, request, pid, command): """ resize a pool. @pname: pool's name @command: 'add' - add a list of disks and hence expand the pool 'remove' - remove a list of disks and hence shrink the pool 'remount' - remount the pool, to apply changed mount options 'quotas' - request pool quota setting change """ with self._handle_exception(request): try: pool = Pool.objects.get(id=pid) except: e_msg = 'Pool with id ({}) does not exist.'.format(pid) handle_exception(Exception(e_msg), request) if (pool.role == 'root' and command != 'quotas'): e_msg = ('Edit operations are not allowed on this pool ({}) ' 'as it contains the operating ' 'system.').format(pool.name) handle_exception(Exception(e_msg), request) if (command == 'remount'): return self._remount(request, pool) if (command == 'quotas'): # There is a pending btrfs change that allows for quota state # change on unmounted Volumes (pools). return self._quotas(request, pool) if not pool.is_mounted: e_msg = ('Pool member / raid edits require an active mount. ' 'Please see the "Maintenance required" section.') handle_exception(Exception(e_msg), request) if command == 'remove' and \ request.data.get('disks', []) == ['missing']: disks = [] logger.debug('Remove missing request skipping disk validation') else: disks = [self._validate_disk_id(diskId, request) for diskId in request.data.get('disks', [])] num_disks_selected = len(disks) dnames = self._role_filter_disk_names(disks, request) new_raid = request.data.get('raid_level', pool.raid) if (command == 'add'): # Only attached disks can be selected during an add operation. num_total_attached_disks = pool.disk_set.attached().count() \ + num_disks_selected for d in disks: if (d.pool is not None): e_msg = ('Disk ({}) cannot be added to this pool ({}) ' 'because it belongs to another pool ({})' '.').format(d.name, pool.name, d.pool.name) handle_exception(Exception(e_msg), request) if (d.btrfs_uuid is not None): e_msg = ('Disk ({}) has a BTRFS filesystem from the ' 'past. If you really like to add it, wipe it ' 'from the Storage -> Disks screen of the ' 'web-ui.').format(d.name) handle_exception(Exception(e_msg), request) if pool.raid == 'single' and new_raid == 'raid10': # TODO: Consider removing once we have better space calc. # Avoid extreme raid level change upwards (space issues). e_msg = ('Pool migration from {} to {} is not ' 'supported.').format(pool.raid, new_raid) handle_exception(Exception(e_msg), request) if (new_raid == 'raid10' and num_total_attached_disks < 4): e_msg = ('A minimum of 4 drives are required for the ' 'raid level: raid10.') handle_exception(Exception(e_msg), request) if (new_raid == 'raid6' and num_total_attached_disks < 3): e_msg = ('A minimum of 3 drives are required for the ' 'raid level: raid6.') handle_exception(Exception(e_msg), request) if (new_raid == 'raid5' and num_total_attached_disks < 2): e_msg = ('A minimum of 2 drives are required for the ' 'raid level: raid5.') handle_exception(Exception(e_msg), request) if (PoolBalance.objects.filter( pool=pool, status__regex=r'(started|running|cancelling|pausing|paused)').exists()): # noqa E501 e_msg = ('A Balance process is already running or paused ' 'for this pool ({}). Resize is not supported ' 'during a balance process.').format(pool.name) handle_exception(Exception(e_msg), request) # TODO: run resize_pool() as async task like start_balance() resize_pool(pool, dnames) # None if no action force = False # During dev add we also offer raid level change, if selected # blanket apply '-f' to allow for reducing metadata integrity. if new_raid != pool.raid: force = True tid = self._balance_start(pool, force=force, convert=new_raid) ps = PoolBalance(pool=pool, tid=tid) ps.save() pool.raid = new_raid for d_o in disks: d_o.pool = pool d_o.save() # Now we ensure udev info is updated via system wide trigger trigger_udev_update() elif (command == 'remove'): if (new_raid != pool.raid): e_msg = ('Raid configuration cannot be changed while ' 'removing disks.') handle_exception(Exception(e_msg), request) detached_disks_selected = 0 for d in disks: # to be removed if (d.pool is None or d.pool != pool): e_msg = ('Disk ({}) cannot be removed because it does ' 'not belong to this ' 'pool ({}).').format(d.name, pool.name) handle_exception(Exception(e_msg), request) if re.match('detached-', d.name) is not None: detached_disks_selected += 1 if detached_disks_selected >= 3: # Artificial constraint but no current btrfs raid level yet # allows for > 2 dev detached and we have a mounted vol. e_msg = ('We currently only support removing two' 'detached disks at a time.') handle_exception(Exception(e_msg), request) attached_disks_selected = ( num_disks_selected - detached_disks_selected) remaining_attached_disks = ( pool.disk_set.attached().count() - attached_disks_selected) if (pool.raid == 'raid0'): e_msg = ('Disks cannot be removed from a pool with this ' 'raid ({}) configuration.').format(pool.raid) handle_exception(Exception(e_msg), request) if (pool.raid == 'raid1' and remaining_attached_disks < 2): e_msg = ('Disks cannot be removed from this pool ' 'because its raid configuration (raid1) ' 'requires a minimum of 2 disks.') handle_exception(Exception(e_msg), request) if (pool.raid == 'raid10' and remaining_attached_disks < 4): e_msg = ('Disks cannot be removed from this pool ' 'because its raid configuration (raid10) ' 'requires a minimum of 4 disks.') handle_exception(Exception(e_msg), request) if (pool.raid == 'raid5' and remaining_attached_disks < 2): e_msg = ('Disks cannot be removed from this pool because ' 'its raid configuration (raid5) requires a ' 'minimum of 2 disks.') handle_exception(Exception(e_msg), request) if (pool.raid == 'raid6' and remaining_attached_disks < 3): e_msg = ('Disks cannot be removed from this pool because ' 'its raid configuration (raid6) requires a ' 'minimum of 3 disks.') handle_exception(Exception(e_msg), request) usage = pool_usage('/%s/%s' % (settings.MNT_PT, pool.name)) size_cut = 0 for d in disks: size_cut += d.size if size_cut >= (pool.size - usage): e_msg = ('Removing disks ({}) may shrink the pool by ' '{} KB, which is greater than available free ' 'space {} KB. This is ' 'not supported.').format(dnames, size_cut, usage) handle_exception(Exception(e_msg), request) # TODO: run resize_pool() as async task like start_balance(), # particularly important on device delete as it initiates an # internal volume balance which cannot be monitored by: # btrfs balance status. # See https://github.com/rockstor/rockstor-core/issues/1722 # Hence we need also to add a 'DIY' status / percentage # reporting method. resize_pool(pool, dnames, add=False) # None if no action # Unlike resize_pool() with add=True a delete has an implicit # balance where the deleted disks contents are re-distributed # across the remaining disks. for d in disks: d.pool = None d.save() else: e_msg = 'Command ({}) is not supported.'.format(command) handle_exception(Exception(e_msg), request) pool.size = pool.usage_bound() pool.save() return Response(PoolInfoSerializer(pool).data)
def post(self, request): """ input is a list of disks, raid_level and name of the pool. """ with self._handle_exception(request): disks = [self._validate_disk(d, request) for d in request.data.get('disks')] pname = request.data['pname'] if (re.match('%s$' % settings.POOL_REGEX, pname) is None): e_msg = ('Invalid characters in pool name. Following ' 'characters are allowed: letter(a-z or A-Z), ' 'digit(0-9), ' 'hyphen(-), underscore(_) or a period(.).') handle_exception(Exception(e_msg), request) if (len(pname) > 255): e_msg = 'Pool name must be less than 255 characters.' handle_exception(Exception(e_msg), request) if (Pool.objects.filter(name=pname).exists()): e_msg = ('Pool ({}) already exists. ' 'Choose a different name.').format(pname) handle_exception(Exception(e_msg), request) if (Share.objects.filter(name=pname).exists()): e_msg = ('A share with this name ({}) exists. Pool and share ' 'names must be distinct. ' 'Choose a different name.').format(pname) handle_exception(Exception(e_msg), request) for d in disks: if (d.btrfs_uuid is not None): e_msg = ('Another BTRFS filesystem exists on this ' 'disk ({}). ' 'Erase the disk and try again.').format(d.name) handle_exception(Exception(e_msg), request) raid_level = request.data['raid_level'] if (raid_level not in self.RAID_LEVELS): e_msg = ('Unsupported raid level. Use one of: ' '{}.').format(self.RAID_LEVELS) handle_exception(Exception(e_msg), request) # consolidated raid0 & raid 1 disk check if (raid_level in self.RAID_LEVELS[1:3] and len(disks) <= 1): e_msg = ('At least 2 disks are required for the raid level: ' '{}.').format(raid_level) handle_exception(Exception(e_msg), request) if (raid_level == self.RAID_LEVELS[3]): if (len(disks) < 4): e_msg = ('A minimum of 4 drives are required for the ' 'raid level: {}.').format(raid_level) handle_exception(Exception(e_msg), request) if (raid_level == self.RAID_LEVELS[4] and len(disks) < 2): e_msg = ('2 or more disks are required for the raid ' 'level: {}.').format(raid_level) handle_exception(Exception(e_msg), request) if (raid_level == self.RAID_LEVELS[5] and len(disks) < 3): e_msg = ('3 or more disks are required for the raid ' 'level: {}.').format(raid_level) handle_exception(Exception(e_msg), request) compression = self._validate_compression(request) mnt_options = self._validate_mnt_options(request) dnames = self._role_filter_disk_names(disks, request) p = Pool(name=pname, raid=raid_level, compression=compression, mnt_options=mnt_options) p.save() p.disk_set.add(*disks) # added for loop to save disks appears p.disk_set.add(*disks) was # not saving disks in test environment for d in disks: d.pool = p d.save() add_pool(p, dnames) p.size = p.usage_bound() p.uuid = btrfs_uuid(dnames[0]) p.save() # Now we ensure udev info is updated via system wide trigger # as per pool resize add, only here it is for a new pool. trigger_udev_update() return Response(PoolInfoSerializer(p).data)
def put(self, request, pname, command): """ resize a pool. @pname: pool's name @command: 'add' - add a list of disks and hence expand the pool 'remove' - remove a list of disks and hence shrink the pool """ with self._handle_exception(request): try: pool = Pool.objects.get(name=pname) except: e_msg = ('Pool(%s) does not exist.' % pname) handle_exception(Exception(e_msg), request) if (pool.role == 'root'): e_msg = ('Edit operations are not allowed on this Pool(%s) ' 'as it contains the operating system.' % pname) handle_exception(Exception(e_msg), request) if (command == 'remount'): return self._remount(request, pool) disks = [self._validate_disk(d, request) for d in request.data.get('disks', [])] num_new_disks = len(disks) dnames = [d.name for d in disks] new_raid = request.data.get('raid_level', pool.raid) num_total_disks = (Disk.objects.filter(pool=pool).count() + num_new_disks) if (command == 'add'): for d in disks: if (d.pool is not None): e_msg = ('Disk(%s) cannot be added to this Pool(%s) ' 'because it belongs to another pool(%s)' % (d.name, pool.name, d.pool.name)) handle_exception(Exception(e_msg), request) if (d.btrfs_uuid is not None): e_msg = ('Disk(%s) has a BTRFS filesystem from the ' 'past. If you really like to add it, wipe it ' 'from the Storage -> Disks screen of the ' 'web-ui' % d.name) handle_exception(Exception(e_msg), request) if (pool.raid != 'single' and new_raid == 'single'): e_msg = ('Pool migration from %s to %s is not supported.' % (pool.raid, new_raid)) handle_exception(Exception(e_msg), request) if (new_raid == 'raid10' and num_total_disks < 4): e_msg = ('A minimum of Four drives are required for the ' 'raid level: raid10') handle_exception(Exception(e_msg), request) if (new_raid == 'raid6' and num_total_disks < 3): e_msg = ('A minimum of Three drives are required for the ' 'raid level: raid6') handle_exception(Exception(e_msg), request) if (new_raid == 'raid5' and num_total_disks < 2): e_msg = ('A minimum of Two drives are required for the ' 'raid level: raid5') handle_exception(Exception(e_msg), request) if (PoolBalance.objects.filter( pool=pool, status__regex=r'(started|running|cancelling|pausing|paused)').exists()): e_msg = ('A Balance process is already running or paused ' 'for this pool(%s). Resize is not supported ' 'during a balance process.' % pool.name) handle_exception(Exception(e_msg), request) resize_pool(pool, dnames) tid = self._balance_start(pool, convert=new_raid) ps = PoolBalance(pool=pool, tid=tid) ps.save() pool.raid = new_raid for d_o in disks: d_o.pool = pool d_o.save() # Now we ensure udev info is updated via system wide trigger trigger_udev_update() elif (command == 'remove'): if (new_raid != pool.raid): e_msg = ('Raid configuration cannot be changed while ' 'removing disks') handle_exception(Exception(e_msg), request) for d in disks: if (d.pool is None or d.pool != pool): e_msg = ('Disk(%s) cannot be removed because it does ' 'not belong to this Pool(%s)' % (d.name, pool.name)) handle_exception(Exception(e_msg), request) remaining_disks = (Disk.objects.filter(pool=pool).count() - num_new_disks) if (pool.raid == 'raid0'): e_msg = ('Disks cannot be removed from a pool with this ' 'raid(%s) configuration' % pool.raid) handle_exception(Exception(e_msg), request) if (pool.raid == 'raid1' and remaining_disks < 2): e_msg = ('Disks cannot be removed from this pool ' 'because its raid configuration(raid1) ' 'requires a minimum of 2 disks') handle_exception(Exception(e_msg), request) if (pool.raid == 'raid10' and remaining_disks < 4): e_msg = ('Disks cannot be removed from this pool ' 'because its raid configuration(raid10) ' 'requires a minimum of 4 disks') handle_exception(Exception(e_msg), request) if (pool.raid == 'raid5' and remaining_disks < 2): e_msg = ('Disks cannot be removed from this pool because ' 'its raid configuration(raid5) requires a ' 'minimum of 2 disks') handle_exception(Exception(e_msg), request) if (pool.raid == 'raid6' and remaining_disks < 3): e_msg = ('Disks cannot be removed from this pool because ' 'its raid configuration(raid6) requires a ' 'minimum of 3 disks') handle_exception(Exception(e_msg), request) usage = pool_usage('/%s/%s' % (settings.MNT_PT, pool.name)) size_cut = 0 for d in disks: size_cut += d.size if size_cut >= (pool.size - usage): e_msg = ('Removing these(%s) disks may shrink the pool by ' '%dKB, which is greater than available free space' ' %dKB. This is not supported.' % (dnames, size_cut, usage)) handle_exception(Exception(e_msg), request) resize_pool(pool, dnames, add=False) tid = self._balance_start(pool) ps = PoolBalance(pool=pool, tid=tid) ps.save() for d in disks: d.pool = None d.save() else: e_msg = ('command(%s) is not supported.' % command) handle_exception(Exception(e_msg), request) pool.size = pool.usage_bound() pool.save() return Response(PoolInfoSerializer(pool).data)