示例#1
0
    def pools_usage(self, last_ts):
        """
        This info is not from proc atm, but will eventually be.
        """
        #collect usage only if the data is more than 30 seconds old
        now = time.mktime(time.gmtime())
        if (now - last_ts < 30):
            return last_ts
        for p in Pool.objects.all():
            arb_disk = Disk.objects.filter(pool=p)[0].name
            try:
                usage = pool_usage(arb_disk)
                pu = PoolUsage(pool=p.name, usage=usage[1])
                self.q.put(pu)

                #get usage of all shares in this pool
                pool_device = Disk.objects.filter(pool=p)[0].name
                share_map = {}
                for share in Share.objects.filter(pool=p):
                    share_map[share.qgroup] = share.name
                usaged = shares_usage(p.name, pool_device, share_map)
                for s in usaged.keys():
                    su = ShareUsage(name=s, usage=usaged[s])
                    self.q.put(su)
            except:
                logger.debug('command exception while getting pool usage '
                             'for: %s' % (p.name))
                logger.exception('exception')
        return now
示例#2
0
    def pools_usage(self, last_ts):
        """
        This info is not from proc atm, but will eventually be.
        """
        #collect usage only if the data is more than 30 seconds old
        now = time.mktime(time.gmtime())
        if (now - last_ts < 30):
            return last_ts
        for p in Pool.objects.all():
            arb_disk = Disk.objects.filter(pool=p)[0].name
            try:
                usage = pool_usage(arb_disk)
                pu = PoolUsage(pool=p.name, usage=usage[1])
                self.q.put(pu)

                #get usage of all shares in this pool
                pool_device = Disk.objects.filter(pool=p)[0].name
                share_map = {}
                for share in Share.objects.filter(pool=p):
                    share_map[share.qgroup] = share.name
                usaged = shares_usage(p.name, pool_device, share_map)
                for s in usaged.keys():
                    su = ShareUsage(name=s, usage=usaged[s])
                    self.q.put(su)
            except:
                logger.debug('command exception while getting pool usage '
                             'for: %s' % (p.name))
                logger.exception('exception')
        return now
示例#3
0
 def free(self, *args, **kwargs):
     # Why do we compute pool usage on the fly like this and not like
     # share usage as part of state refresh? This is a lot simpler and
     # less code. For share usage, this type of logic could slow things
     # down quite a bit because there can be 100's of Shares, but number
     # of Pools even on a large instance is usually no more than a few.
     return self.size - pool_usage('%s%s' % (settings.MNT_PT, self.name))
示例#4
0
 def _btrfs_disk_import(self, dname, request):
     try:
         disk = self._validate_disk(dname, request)
         p_info = get_pool_info(dname)
         # get some options from saved config?
         po = Pool(name=p_info['label'], raid="unknown")
         # need to save it so disk objects get updated properly in the for
         # loop below.
         po.save()
         for d in p_info['disks']:
             do = Disk.objects.get(name=d)
             do.pool = po
             do.parted = False
             do.save()
             mount_root(po)
         po.raid = pool_raid('%s%s' % (settings.MNT_PT, po.name))['data']
         po.size = pool_usage('%s%s' % (settings.MNT_PT, po.name))[0]
         po.save()
         enable_quota(po)
         import_shares(po, request)
         for share in Share.objects.filter(pool=po):
             import_snapshots(share)
         return Response(DiskInfoSerializer(disk).data)
     except Exception, e:
         e_msg = ('Failed to import any pool on this device(%s). Error: %s'
                  % (dname, e.__str__()))
         handle_exception(Exception(e_msg), request)
示例#5
0
 def free(self, *args, **kwargs):
     # Why do we compute pool usage on the fly like this and not like
     # share usage as part of state refresh? This is a lot simpler and
     # less code. For share usage, this type of logic could slow things
     # down quite a bit because there can be 100's of Shares, but number
     # of Pools even on a large instance is usually no more than a few.
     return self.size - pool_usage('%s%s' % (settings.MNT_PT, self.name))
示例#6
0
 def pools_usage(self, last_ts):
     """
     This info is not from proc atm, but will eventually be.
     """
     #collect usage only if the data is more than 30 seconds old
     now = time.mktime(time.gmtime())
     if (now - last_ts < 30):
         return last_ts
     ts = datetime.utcnow().replace(tzinfo=utc)
     for p in Pool.objects.all():
         arb_disk = Disk.objects.filter(pool=p)[0].name
         try:
             usage = pool_usage(arb_disk)
             pu = None
             try:
                 pu = PoolUsage.objects.filter(pool=p.name).latest('id')
                 if ((ts - pu.ts).total_seconds() > 90):
                     pu = None
             except Exception, e:
                 e_msg = ('Unable to get latest pool usage object for '
                          'pool(%s). A new one will be created.' % p.name)
                 logger.error(e_msg)
             if (pu is None or pu.usage != usage[1]):
                 pu = PoolUsage(pool=p.name, usage=usage[1], ts=ts)
             else:
                 pu.ts = ts
                 pu.count = pu.count + 1
             self._save_wrapper(pu)
         except Exception, e:
             logger.debug('command exception while getting pool usage '
                          'for: %s' % (p.name))
             logger.exception(e)
 def pools_usage(self, last_ts):
     """
     This info is not from proc atm, but will eventually be.
     """
     #  collect usage only if the data is more than 30 seconds old
     now = time.mktime(time.gmtime())
     if (now - last_ts < 30):
         return last_ts
     ts = datetime.utcnow().replace(tzinfo=utc)
     for p in Pool.objects.all():
         arb_disk = Disk.objects.filter(pool=p)[0].name
         try:
             usage = pool_usage(arb_disk)
             pu = None
             try:
                 pu = PoolUsage.objects.filter(pool=p.name).latest('id')
                 if ((ts - pu.ts).total_seconds() > 90):
                     pu = None
             except Exception, e:
                 e_msg = ('Unable to get latest pool usage object for '
                          'pool(%s). A new one will be created.' % p.name)
                 logger.error(e_msg)
             if (pu is None or pu.usage != usage[1]):
                 pu = PoolUsage(pool=p.name, usage=usage[1], ts=ts)
             else:
                 pu.ts = ts
                 pu.count = pu.count + 1
             self._save_wrapper(pu)
         except Exception, e:
             logger.debug('command exception while getting pool usage '
                          'for: %s' % (p.name))
             logger.exception(e)
示例#8
0
 def _btrfs_disk_import(self, dname, request):
     try:
         disk = self._validate_disk(dname, request)
         p_info = get_pool_info(dname)
         # get some options from saved config?
         po = Pool(name=p_info['label'], raid="unknown")
         # need to save it so disk objects get updated properly in the for
         # loop below.
         po.save()
         for d in p_info['disks']:
             do = Disk.objects.get(name=d)
             do.pool = po
             do.parted = False
             do.save()
             mount_root(po)
         po.raid = pool_raid('%s%s' % (settings.MNT_PT, po.name))['data']
         po.size = pool_usage('%s%s' % (settings.MNT_PT, po.name))[0]
         po.save()
         enable_quota(po)
         import_shares(po, request)
         for share in Share.objects.filter(pool=po):
             import_snapshots(share)
         return Response(DiskInfoSerializer(disk).data)
     except Exception, e:
         e_msg = (
             'Failed to import any pool on this device(%s). Error: %s' %
             (dname, e.__str__()))
         handle_exception(Exception(e_msg), request)
示例#9
0
 def _refresh_pool_state(self, pool):
     dname = Disk.objects.filter(pool=pool)[0].name
     mount_root(pool, dname)
     pool_info = get_pool_info(dname)
     pool.name = pool_info['label']
     pool.raid = pool_raid('%s%s' % (settings.MNT_PT, pool.name))['data']
     pool.size = pool_usage('%s%s' % (settings.MNT_PT, pool.name))[0]
     pool.save()
     return pool
示例#10
0
    def post(self, request):
        """
        input is a list of disks, raid_level and name of the pool.
        """
        try:
            disks = request.DATA['disks'].split(',')
            pname = request.DATA['pname']
            if (re.match('%s$' % settings.POOL_REGEX, pname) is None):
                e_msg = ('Pool name must start with a letter(a-z) and can'
                         ' be followed by any of the following characters: '
                         'letter(a-z), digits(0-9), hyphen(-), underscore'
                         '(_) or a period(.).')
                handle_exception(Exception(e_msg), request)

            if (Pool.objects.filter(name=pname).exists()):
                e_msg = ('Pool with name: %s already exists.' % pname)
                handle_exception(Exception(e_msg), request)

            for d in disks:
                if (not Disk.objects.filter(name=d).exists()):
                    e_msg = ('Unknown disk: %s' % d)
                    handle_exception(Exception(e_msg), request)

            raid_level = request.DATA['raid_level']
            if (raid_level not in self.RAID_LEVELS):
                e_msg = ('Unsupported raid level. use one of: %s' %
                         self.RAID_LEVELS)
                handle_exception(Exception(e_msg), request)
            if (raid_level in self.RAID_LEVELS[0:2] and len(disks) == 1):
                e_msg = ('More than one disk is required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[2] and len(disks) < 4):
                e_msg = ('Four or more disks are required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[4] and len(disks) < 2):
                e_msg = ('Two or more disks are required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[5] and len(disks) < 3):
                e_msg = ('Three or more disks are required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)

            p = Pool(name=pname, raid=raid_level)
            add_pool(pname, raid_level, raid_level, disks)
            usage = pool_usage(disks[0])
            p.size = usage[0]
            p.save()
            p.disk_set.add(*[Disk.objects.get(name=d) for d in disks])
            return Response(PoolInfoSerializer(p).data)
        except RockStorAPIException:
            raise
        except Exception, e:
            handle_exception(e, request)
示例#11
0
    def put(self, request, pname, command):
        """
        resize a pool.
        @pname: pool's name
        @command: 'add' - add a list of disks and hence expand the pool
                  'remove' - remove a list of disks and hence shrink the pool
        """
        try:
            if (not Pool.objects.filter(name=pname).exists()):
                msg = ('pool: %s does not exist' % pname)
                raise Exception(msg)

            disks = request.DATA['disks'].split(',')
            if (len(disks) == 0):
                msg = ('list of disks in the input is empty')
                raise Exception(msg)

            pool = Pool.objects.get(name=pname)
            mount_disk = Disk.objects.filter(pool=pool)[0].name
            if (command == 'add'):
                for d in disks:
                    d_o = Disk.objects.get(name=d)
                    if (d_o.pool is not None):
                        msg = ('disk %s already part of pool %s' %
                               (d, d_o.pool.name))
                        raise Exception(msg)
                    d_o.pool = pool
                    d_o.save()
                resize_pool(pool.name, mount_disk, disks)
            elif (command == 'remove'):
                if (len(Disk.objects.filter(pool=pool)) == 1):
                    msg = (
                        'pool %s had only one disk. use delete command instead'
                    )
                    raise Exception(msg)
                for d in disks:
                    d_o = Disk.objects.get(name=d)
                    if (d_o.pool != pool):
                        msg = ('disk %s not part of pool %s' %
                               (d, d_o.pool.name))
                        raise Exception(msg)
                    d_o.pool = None
                    d_o.save()
                mount_disk = Disk.objects.filter(pool=pool)[0].name
                resize_pool(pool.name, mount_disk, disks, add=False)
            else:
                msg = ('unknown command: %s' % command)
                raise Exception(msg)
            usage = pool_usage(mount_disk)
            pool.size = usage[0]
            pool.save()
            return Response(PoolInfoSerializer(pool).data)

        except Exception, e:
            handle_exception(e, request)
示例#12
0
 def _refresh_pool_state(self, pool):
     fd = pool.disk_set.first()
     if fd is None:
         return pool.delete()
     mount_root(pool, fd.name)
     pool_info = get_pool_info(fd.name)
     pool.name = pool_info["label"]
     pool.raid = pool_raid("%s%s" % (settings.MNT_PT, pool.name))["data"]
     pool.size = pool_usage("%s%s" % (settings.MNT_PT, pool.name))[0]
     pool.save()
     return pool
示例#13
0
 def _create_root_pool(self, d):
     p = Pool(name=settings.ROOT_POOL, raid='single')
     p.disk_set.add(d)
     p.save()
     d.pool = p
     d.save()
     p.size = pool_usage(mount_root(p))[0]
     enable_quota(p)
     p.uuid = btrfs_uuid(d.name)
     p.save()
     return p
示例#14
0
 def _refresh_pool_state(self, pool):
     fd = pool.disk_set.first()
     if (fd is None):
         return pool.delete()
     mount_root(pool, fd.name)
     pool_info = get_pool_info(fd.name)
     pool.name = pool_info['label']
     pool.raid = pool_raid('%s%s' % (settings.MNT_PT, pool.name))['data']
     pool.size = pool_usage('%s%s' % (settings.MNT_PT, pool.name))[0]
     pool.save()
     return pool
示例#15
0
    def put(self, request, pname, command):
        """
        resize a pool.
        @pname: pool's name
        @command: 'add' - add a list of disks and hence expand the pool
                  'remove' - remove a list of disks and hence shrink the pool
        """
        try:
            if (not Pool.objects.filter(name=pname).exists()):
                msg = ('pool: %s does not exist' % pname)
                raise Exception(msg)

            disks = request.DATA['disks'].split(',')
            if (len(disks) == 0):
                msg = ('list of disks in the input is empty')
                raise Exception(msg)

            pool = Pool.objects.get(name=pname)
            mount_disk = Disk.objects.filter(pool=pool)[0].name
            if (command == 'add'):
                for d in disks:
                    d_o = Disk.objects.get(name=d)
                    if (d_o.pool is not None):
                        msg = ('disk %s already part of pool %s' %
                            (d, d_o.pool.name))
                        raise Exception(msg)
                    d_o.pool = pool
                    d_o.save()
                resize_pool(pool.name, mount_disk, disks)
            elif (command == 'remove'):
                if (len(Disk.objects.filter(pool=pool)) == 1):
                    msg = ('pool %s had only one disk. use delete command instead')
                    raise Exception(msg)
                for d in disks:
                    d_o = Disk.objects.get(name=d)
                    if (d_o.pool != pool):
                        msg = ('disk %s not part of pool %s' % (d, d_o.pool.name))
                        raise Exception(msg)
                    d_o.pool = None
                    d_o.save()
                mount_disk = Disk.objects.filter(pool=pool)[0].name
                resize_pool(pool.name, mount_disk, disks, add=False)
            else:
                msg = ('unknown command: %s' % command)
                raise Exception(msg)
            usage = pool_usage(mount_disk)
            pool.size = usage[0]
            pool.save()
            return Response(PoolInfoSerializer(pool).data)

        except Exception, e:
            handle_exception(e, request)
示例#16
0
def btrfs_add_pool(pool):
	disks = btrfs_disk_scan()
	disks_pool = []
	for disk in pool["disks"]:
		for disk_d in disks:
			if disk == disk_d["name"]:
				disks_pool.append(disk_d)
	dnames = [d["name"] for d in disks_pool]
	pool["disks"] = disks_pool
	add_pool(pool,dnames)
	pool["size"] = pool_usage(mount_root(pool))[0]
	pool["uuid"] = btrfs_uuid(dnames[0])
	return pool
示例#17
0
def btrfs_add_pool(pool):
    disks = btrfs_disk_scan()
    disks_pool = []
    for disk in pool["disks"]:
        for disk_d in disks:
            if disk == disk_d["name"]:
                disks_pool.append(disk_d)
    dnames = [d["name"] for d in disks_pool]
    pool["disks"] = disks_pool
    add_pool(pool, dnames)
    pool["size"] = pool_usage(mount_root(pool))[0]
    pool["uuid"] = btrfs_uuid(dnames[0])
    return pool
示例#18
0
 def _update_disk_state():
     disks = scan_disks(settings.MIN_DISK_SIZE)
     for d in disks:
         dob = None
         if (Disk.objects.filter(name=d.name).exists()):
             dob = Disk.objects.get(name=d.name)
             dob.serial = d.serial
         elif (Disk.objects.filter(serial=d.serial).exists()):
             dob = Disk.objects.get(serial=d.serial)
             dob.name = d.name
         else:
             dob = Disk(name=d.name, size=d.size, parted=d.parted,
                        btrfs_uuid=d.btrfs_uuid, model=d.model,
                        serial=d.serial, transport=d.transport,
                        vendor=d.vendor)
         dob.size = d.size
         dob.parted = d.parted
         dob.offline = False
         dob.model = d.model
         dob.transport = d.transport
         dob.vendor = d.vendor
         dob.btrfs_uuid = d.btrfs_uuid
         if (d.fstype is not None and d.fstype != 'btrfs'):
             dob.btrfs_uuid = None
             dob.parted = True
         if (Pool.objects.filter(name=d.label).exists()):
             dob.pool = Pool.objects.get(name=d.label)
         else:
             dob.pool = None
         if (dob.pool is None and d.root is True):
             p = Pool(name=settings.ROOT_POOL, raid='single')
             p.disk_set.add(dob)
             p.save()
             dob.pool = p
             dob.save()
             p.size = pool_usage(mount_root(p))[0]
             enable_quota(p)
             p.uuid = btrfs_uuid(dob.name)
             p.save()
         dob.save()
     for do in Disk.objects.all():
         if (do.name not in [d.name for d in disks]):
             do.offline = True
         else:
             try:
                 # for non ata/sata drives
                 do.smart_available, do.smart_enabled = smart.available(do.name)
             except Exception, e:
                 logger.exception(e)
                 do.smart_available = do.smart_enabled = False
         do.save()
示例#19
0
 def _refresh_pool_state():
     for p in Pool.objects.all():
         if (p.disk_set.count() == 0):
             p.delete()
             continue
         try:
             mount_root(p)
             fd = p.disk_set.first()
             pool_info = get_pool_info(fd.name)
             p.name = pool_info['label']
             p.raid = pool_raid('%s%s' % (settings.MNT_PT, p.name))['data']
             p.size = pool_usage('%s%s' % (settings.MNT_PT, p.name))[0]
             p.save()
         except Exception, e:
             logger.error('Exception while refreshing state for '
                          'Pool(%s). Moving on: %s' % (p.name, e.__str__()))
             logger.exception(e)
示例#20
0
 def _refresh_pool_state():
     for p in Pool.objects.all():
         if (p.disk_set.count() == 0):
             p.delete()
             continue
         try:
             mount_root(p)
             fd = p.disk_set.first()
             pool_info = get_pool_info(fd.name)
             p.name = pool_info['label']
             p.raid = pool_raid('%s%s' % (settings.MNT_PT, p.name))['data']
             p.size = pool_usage('%s%s' % (settings.MNT_PT, p.name))[0]
             p.save()
         except Exception, e:
             logger.error('Exception while refreshing state for '
                          'Pool(%s). Moving on: %s' %
                          (p.name, e.__str__()))
             logger.exception(e)
示例#21
0
 def _btrfs_disk_import(self, dname, request):
     try:
         disk = self._validate_disk(dname, request)
         p_info = get_pool_info(dname)
         #get some options from saved config?
         po = Pool(name=p_info['label'], raid="unknown")
         for d in p_info['disks']:
             do = Disk.objects.get(name=d)
             do.pool = po
             do.save()
             mount_root(po, d)
         po.raid = pool_raid('%s%s' % (settings.MNT_PT, po.name))['data']
         po.size = pool_usage('%s%s' % (settings.MNT_PT, po.name))[0]
         po.save()
         return Response(DiskInfoSerializer(disk).data)
     except Exception, e:
         e_msg = ('Failed to import any pool on this device(%s). Error: %s'
                  % (dname, e.__str__()))
         handle_exception(Exception(e_msg), request)
示例#22
0
    def post(self, request):
        """
        input is a list of disks, raid_level and name of the pool.
        """
        try:
            disks = request.DATA['disks'].split(',')
            pname = request.DATA['pname']

            if (Pool.objects.filter(name=pname).exists()):
                e_msg = ('Pool with name: %s already exists.' % pname)
                handle_exception(Exception(e_msg), request)

            for d in disks:
                if (not Disk.objects.filter(name=d).exists()):
                    e_msg = ('Unknown disk: %s' % d)
                    handle_exception(Exception(e_msg), request)

            raid_level = request.DATA['raid_level']
            if (raid_level not in self.RAID_LEVELS):
                e_msg = ('Unsupported raid level. use one of: %s' %
                         self.RAID_LEVELS)
                handle_exception(Exception(e_msg), request)
            if (raid_level in self.RAID_LEVELS[0:2] and len(disks) == 1):
                e_msg = ('More than one disk is required for the chosen raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[2] and len(disks) < 4):
                e_msg = ('Four or more disks are required for the chose raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)

            p = Pool(name=pname, raid=raid_level)
            add_pool(pname, raid_level, raid_level, disks)
            usage = pool_usage(disks[0])
            p.size = usage[0]
            p.save()
            p.disk_set.add(*[Disk.objects.get(name=d) for d in disks])
            return Response(PoolInfoSerializer(p).data)
        except RockStorAPIException:
            raise
        except Exception, e:
            handle_exception(e, request)
示例#23
0
    def post(self, request):
        """
        input is a list of disks, raid_level and name of the pool.
        """
        try:
            disks = request.DATA['disks'].split(',')
            pname = request.DATA['pname']

            if (Pool.objects.filter(name=pname).exists()):
                e_msg = ('Pool with name: %s already exists.' % pname)
                handle_exception(Exception(e_msg), request)

            for d in disks:
                if (not Disk.objects.filter(name=d).exists()):
                    e_msg = ('Unknown disk: %s' % d)
                    handle_exception(Exception(e_msg), request)

            raid_level = request.DATA['raid_level']
            if (raid_level not in self.RAID_LEVELS):
                e_msg = ('Unsupported raid level. use one of: %s' %
                         self.RAID_LEVELS)
                handle_exception(Exception(e_msg), request)
            if (raid_level in self.RAID_LEVELS[0:2] and len(disks) == 1):
                e_msg = ('More than one disk is required for the chosen raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[2] and len(disks) < 4):
                e_msg = ('Four or more disks are required for the chose raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)

            p = Pool(name=pname, raid=raid_level)
            add_pool(pname, raid_level, raid_level, disks)
            usage = pool_usage(disks[0])
            p.size = usage[0]
            p.save()
            p.disk_set.add(*[Disk.objects.get(name=d) for d in disks])
            return Response(PoolInfoSerializer(p).data)
        except RockStorAPIException:
            raise
        except Exception, e:
            handle_exception(e, request)
示例#24
0
 def _btrfs_disk_import(self, dname, request):
     try:
         disk = self._validate_disk(dname, request)
         p_info = get_pool_info(dname)
         #get some options from saved config?
         po = Pool(name=p_info['label'], raid="unknown")
         for d in p_info['disks']:
             do = Disk.objects.get(name=d)
             do.pool = po
             do.save()
             mount_root(po, d)
         po.raid = pool_raid('%s%s' % (settings.MNT_PT, po.name))['data']
         po.size = pool_usage('%s%s' % (settings.MNT_PT, po.name))[0]
         po.save()
         return Response(DiskInfoSerializer(disk).data)
     except Exception, e:
         e_msg = (
             'Failed to import any pool on this device(%s). Error: %s' %
             (dname, e.__str__()))
         handle_exception(Exception(e_msg), request)
示例#25
0
 def _update_disk_state():
     """
     A db atomic method to update the database of attached disks / drives.
     Works only on device serial numbers for drive identification.
     Calls scan_disks to establish the current connected drives info.
     Initially removes duplicate by serial number db entries to deal
     with legacy db states and obfuscates all previous device names as they
     are transient. The drive database is then updated with the attached
     disks info and previously known drives no longer found attached are
     marked as offline. All offline drives have their SMART availability and
     activation status removed and all attached drives have their SMART
     availability assessed and activated if available.
     :return: serialized models of attached and missing disks via serial num
     """
     # Acquire a list (namedtupil collection) of attached drives > min size
     disks = scan_disks(settings.MIN_DISK_SIZE)
     serial_numbers_seen = []
     # Make sane our db entries in view of what we know we have attached.
     # Device serial number is only known external unique entry, scan_disks
     # make this so in the case of empty or repeat entries by providing
     # fake serial numbers which are in turn flagged via WebUI as unreliable.
     # 1) scrub all device names with unique but nonsense uuid4
     # 1) mark all offline disks as such via db flag
     # 2) mark all offline disks smart available and enabled flags as False
     logger.info('update_disk_state() Called')
     for do in Disk.objects.all():
         # Replace all device names with a unique placeholder on each scan
         # N.B. do not optimize by re-using uuid index as this could lead
         # to a non refreshed webui acting upon an entry that is different
         # from that shown to the user.
         do.name = str(uuid.uuid4()).replace('-', '')  # 32 chars long
         # Delete duplicate or fake by serial number db disk entries.
         # It makes no sense to save fake serial number drives between scans
         # as on each scan the serial number is re-generated anyway.
         if (do.serial in serial_numbers_seen) or (len(do.serial) == 48):
             logger.info('Deleting duplicate or fake (by serial) Disk db '
                         'entry. Serial = %s' % do.serial)
             do.delete()  # django >=1.9 returns a dict of deleted items.
             # Continue onto next db disk object as nothing more to process.
             continue
         # first encounter of this serial in the db so stash it for reference
         serial_numbers_seen.append(deepcopy(do.serial))
         # Look for devices (by serial number) that are in the db but not in
         # our disk scan, ie offline / missing.
         if (do.serial not in [d.serial for d in disks]):
             # update the db entry as offline
             do.offline = True
             # disable S.M.A.R.T available and enabled flags.
             do.smart_available = do.smart_enabled = False
         do.save()  # make sure all updates are flushed to db
     # Our db now has no device name info as all dev names are place holders.
     # Iterate over attached drives to update the db's knowledge of them.
     # Kernel dev names are unique so safe to overwrite our db unique name.
     for d in disks:
         # start with an empty disk object
         dob = None
         # If the db has an entry with this disk's serial number then
         # use this db entry and update the device name from our recent scan.
         if (Disk.objects.filter(serial=d.serial).exists()):
             dob = Disk.objects.get(serial=d.serial)
             dob.name = d.name
         else:
             # We have an assumed new disk entry as no serial match in db.
             # Build a new entry for this disk.
             dob = Disk(name=d.name, serial=d.serial)
         # Update the db disk object (existing or new) with our scanned info
         dob.size = d.size
         dob.parted = d.parted
         dob.offline = False  # as we are iterating over attached devices
         dob.model = d.model
         dob.transport = d.transport
         dob.vendor = d.vendor
         dob.btrfs_uuid = d.btrfs_uuid
         # If attached disk has an fs and it isn't btrfs
         if (d.fstype is not None and d.fstype != 'btrfs'):
             dob.btrfs_uuid = None
             dob.parted = True
         # If our existing Pool db knows of this disk's pool via it's label:
         if (Pool.objects.filter(name=d.label).exists()):
             # update the disk db object's pool field accordingly.
             dob.pool = Pool.objects.get(name=d.label)
         else:  # this disk is not known to exist in any pool via it's label
             dob.pool = None
         # If no pool has yet been found with this disk's label in and
         # the attached disk is our root disk (flagged by scan_disks)
         if (dob.pool is None and d.root is True):
             # setup our special root disk db entry in Pool
             p = Pool(name=settings.ROOT_POOL, raid='single')
             p.disk_set.add(dob)
             p.save()
             # update disk db object to reflect special root pool status
             dob.pool = p
             dob.save()
             p.size = pool_usage(mount_root(p))[0]
             enable_quota(p)
             p.uuid = btrfs_uuid(dob.name)
             p.save()
         # save our updated db disk object
         dob.save()
     # Update online db entries with S.M.A.R.T availability and status.
     for do in Disk.objects.all():
         # find all the not offline db entries
         if (not do.offline):
             # We have an attached disk db entry
             if re.match('vd', do.name):
                 # Virtio disks (named vd*) have no smart capability.
                 # avoids cluttering logs with exceptions on these devices.
                 do.smart_available = do.smart_enabled = False
                 continue
             # try to establish smart availability and status and update db
             try:
                 # for non ata/sata drives
                 do.smart_available, do.smart_enabled = smart.available(do.name)
             except Exception, e:
                 logger.exception(e)
                 do.smart_available = do.smart_enabled = False
         do.save()
示例#26
0
    def put(self, request, pname, command):
        """
        resize a pool.
        @pname: pool's name
        @command: 'add' - add a list of disks and hence expand the pool
                  'remove' - remove a list of disks and hence shrink the pool
        """
        with self._handle_exception(request):
            if (pname == settings.ROOT_POOL):
                e_msg = ('Edit operations are not allowed on this Pool(%s) '
                         'as it contains the operating system.' % pname)
                handle_exception(Exception(e_msg), request)
            try:
                pool = Pool.objects.get(name=pname)
            except:
                e_msg = ('Pool(%s) does not exist.' % pname)
                handle_exception(Exception(e_msg), request)

            if (command == 'remount'):
                return self._remount(request, pool)

            disks = [
                self._validate_disk(d, request)
                for d in request.data.get('disks')
            ]
            num_new_disks = len(disks)
            if (num_new_disks == 0):
                e_msg = ('List of disks in the input cannot be empty.')
                handle_exception(Exception(e_msg), request)
            dnames = [d.name for d in disks]
            mount_disk = Disk.objects.filter(pool=pool)[0].name
            new_raid = request.data.get('raid_level', pool.raid)
            num_total_disks = (Disk.objects.filter(pool=pool).count() +
                               num_new_disks)
            usage = pool_usage('/%s/%s' % (settings.MNT_PT, pool.name))
            # free_percent = (usage[2]/usage[0]) * 100
            free_percent = (usage[2] * 100) / usage[0]
            threshold_percent = self.ADD_THRESHOLD * 100
            if (command == 'add'):
                for d in disks:
                    if (d.pool is not None):
                        e_msg = ('Disk(%s) cannot be added to this Pool(%s) '
                                 'because it belongs to another pool(%s)' %
                                 (d.name, pool.name, d.pool.name))
                        handle_exception(Exception(e_msg), request)
                    if (d.btrfs_uuid is not None):
                        e_msg = ('Disk(%s) has a BTRFS filesystem from the '
                                 'past. If you really like to add it, wipe it '
                                 'from the Storage -> Disks screen of the '
                                 'web-ui' % d.name)
                        handle_exception(Exception(e_msg), request)
                if (new_raid not in self.SUPPORTED_MIGRATIONS[pool.raid]):
                    e_msg = ('Pool migration from %s to %s is not supported.' %
                             (pool.raid, new_raid))
                    handle_exception(Exception(e_msg), request)

                if (PoolBalance.objects.filter(
                        pool=pool,
                        status__regex=r'(started|running)').exists()):
                    e_msg = ('A Balance process is already running for this '
                             'pool(%s). Resize is not supported during a '
                             'balance process.' % pool.name)
                    handle_exception(Exception(e_msg), request)

                if (free_percent < threshold_percent):
                    e_msg = ('Resize is only supported when there is at least '
                             '%d percent free space available. But currently '
                             'only %d percent is free. Remove some data and '
                             'try again.' % (threshold_percent, free_percent))
                    handle_exception(Exception(e_msg), request)

                if (new_raid != pool.raid):
                    if (((pool.raid in ('single', 'raid0'))
                         and new_raid in ('raid1', 'raid10'))):
                        cur_num_disks = num_total_disks - num_new_disks
                        if (num_new_disks < cur_num_disks):
                            e_msg = ('For single/raid0 to raid1/raid10 '
                                     'conversion, at least as many as present '
                                     'number of disks must be added. %d '
                                     'disks are provided, but at least %d are '
                                     'required.' %
                                     (num_new_disks, cur_num_disks))
                            handle_exception(Exception(e_msg), request)

                resize_pool(pool, mount_disk, dnames)
                balance_pid = balance_start(pool, mount_disk, convert=new_raid)
                ps = PoolBalance(pool=pool, pid=balance_pid)
                ps.save()

                pool.raid = new_raid
                for d_o in disks:
                    d_o.pool = pool
                    d_o.save()

            elif (command == 'remove'):
                if (new_raid != pool.raid):
                    e_msg = ('Raid configuration cannot be changed while '
                             'removing disks')
                    handle_exception(Exception(e_msg), request)
                for d in disks:
                    if (d.pool is None or d.pool != pool):
                        e_msg = ('Disk(%s) cannot be removed because it does '
                                 'not belong to this Pool(%s)' %
                                 (d.name, pool.name))
                        handle_exception(Exception(e_msg), request)
                remaining_disks = (Disk.objects.filter(pool=pool).count() -
                                   num_new_disks)
                if (pool.raid in (
                        'raid0',
                        'single',
                )):
                    e_msg = ('Disks cannot be removed from a pool with this '
                             'raid(%s) configuration' % pool.raid)
                    handle_exception(Exception(e_msg), request)

                if (pool.raid in (
                        'raid5',
                        'raid6',
                )):
                    e_msg = ('Disk removal is not supported for pools with '
                             'raid5/6 configuration')
                    handle_exception(Exception(e_msg), request)

                if (pool.raid == 'raid10'):
                    if (num_new_disks != 2):
                        e_msg = ('Only two disks can be removed at once from '
                                 'this pool because of its raid '
                                 'configuration(%s)' % pool.raid)
                        handle_exception(Exception(e_msg), request)
                    elif (remaining_disks < 4):
                        e_msg = ('Disks cannot be removed from this pool '
                                 'because its raid configuration(%s) '
                                 'requires a minimum of 4 disks' % pool.raid)
                        handle_exception(Exception(e_msg), request)

                elif (pool.raid == 'raid1'):
                    if (num_new_disks != 1):
                        e_msg = ('Only one disk can be removed at once from '
                                 'this pool because of its raid '
                                 'configuration(%s)' % pool.raid)
                        handle_exception(Exception(e_msg), request)
                    elif (remaining_disks < 2):
                        e_msg = ('Disks cannot be removed from this pool '
                                 'because its raid configuration(%s) '
                                 'requires a minimum of 2 disks' % pool.raid)
                        handle_exception(Exception(e_msg), request)

                threshold_percent = 100 - threshold_percent
                if (free_percent < threshold_percent):
                    e_msg = ('Removing disks is only supported when there is '
                             'at least %d percent free space available. But '
                             'currently only %d percent is free. Remove some '
                             'data and try again.' %
                             (threshold_percent, free_percent))
                    handle_exception(Exception(e_msg), request)

                resize_pool(pool, mount_disk, dnames, add=False)
                balance_pid = balance_start(pool, mount_disk)
                ps = PoolBalance(pool=pool, pid=balance_pid)
                ps.save()

                for d in disks:
                    d.pool = None
                    d.save()

            else:
                e_msg = ('command(%s) is not supported.' % command)
                handle_exception(Exception(e_msg), request)
            usage = pool_usage('/%s/%s' % (settings.MNT_PT, pool.name))
            pool.size = usage[0]
            pool.save()
            return Response(PoolInfoSerializer(pool).data)
示例#27
0
    def put(self, request, pid, command):
        """
        resize a pool.
        @pname: pool's name
        @command: 'add' - add a list of disks and hence expand the pool
                  'remove' - remove a list of disks and hence shrink the pool
                  'remount' - remount the pool, to apply changed mount options
                  'quotas' - request pool quota setting change
        """
        with self._handle_exception(request):
            try:
                pool = Pool.objects.get(id=pid)
            except:
                e_msg = 'Pool with id ({}) does not exist.'.format(pid)
                handle_exception(Exception(e_msg), request)

            if (pool.role == 'root' and command != 'quotas'):
                e_msg = ('Edit operations are not allowed on this pool ({}) '
                         'as it contains the operating '
                         'system.').format(pool.name)
                handle_exception(Exception(e_msg), request)

            if (command == 'remount'):
                return self._remount(request, pool)

            if (command == 'quotas'):
                # There is a pending btrfs change that allows for quota state
                # change on unmounted Volumes (pools).
                return self._quotas(request, pool)

            if not pool.is_mounted:
                e_msg = ('Pool member / raid edits require an active mount. '
                         'Please see the "Maintenance required" section.')
                handle_exception(Exception(e_msg), request)

            if command == 'remove' and \
                    request.data.get('disks', []) == ['missing']:
                disks = []
                logger.debug('Remove missing request skipping disk validation')
            else:
                disks = [self._validate_disk_id(diskId, request) for diskId in
                         request.data.get('disks', [])]

            num_disks_selected = len(disks)
            dnames = self._role_filter_disk_names(disks, request)
            new_raid = request.data.get('raid_level', pool.raid)

            if (command == 'add'):
                # Only attached disks can be selected during an add operation.
                num_total_attached_disks = pool.disk_set.attached().count() \
                                  + num_disks_selected
                for d in disks:
                    if (d.pool is not None):
                        e_msg = ('Disk ({}) cannot be added to this pool ({}) '
                                 'because it belongs to another pool ({})'
                                 '.').format(d.name, pool.name, d.pool.name)
                        handle_exception(Exception(e_msg), request)
                    if (d.btrfs_uuid is not None):
                        e_msg = ('Disk ({}) has a BTRFS filesystem from the '
                                 'past. If you really like to add it, wipe it '
                                 'from the Storage -> Disks screen of the '
                                 'web-ui.').format(d.name)
                        handle_exception(Exception(e_msg), request)

                if pool.raid == 'single' and new_raid == 'raid10':
                    # TODO: Consider removing once we have better space calc.
                    # Avoid extreme raid level change upwards (space issues).
                    e_msg = ('Pool migration from {} to {} is not '
                             'supported.').format(pool.raid, new_raid)
                    handle_exception(Exception(e_msg), request)

                if (new_raid == 'raid10' and num_total_attached_disks < 4):
                    e_msg = ('A minimum of 4 drives are required for the '
                             'raid level: raid10.')
                    handle_exception(Exception(e_msg), request)

                if (new_raid == 'raid6' and num_total_attached_disks < 3):
                    e_msg = ('A minimum of 3 drives are required for the '
                             'raid level: raid6.')
                    handle_exception(Exception(e_msg), request)

                if (new_raid == 'raid5' and num_total_attached_disks < 2):
                    e_msg = ('A minimum of 2 drives are required for the '
                             'raid level: raid5.')
                    handle_exception(Exception(e_msg), request)

                if (PoolBalance.objects.filter(
                        pool=pool,
                        status__regex=r'(started|running|cancelling|pausing|paused)').exists()):  # noqa E501
                    e_msg = ('A Balance process is already running or paused '
                             'for this pool ({}). Resize is not supported '
                             'during a balance process.').format(pool.name)
                    handle_exception(Exception(e_msg), request)

                # TODO: run resize_pool() as async task like start_balance()
                resize_pool(pool, dnames)  # None if no action
                force = False
                # During dev add we also offer raid level change, if selected
                # blanket apply '-f' to allow for reducing metadata integrity.
                if new_raid != pool.raid:
                    force = True
                tid = self._balance_start(pool, force=force, convert=new_raid)
                ps = PoolBalance(pool=pool, tid=tid)
                ps.save()

                pool.raid = new_raid
                for d_o in disks:
                    d_o.pool = pool
                    d_o.save()
                # Now we ensure udev info is updated via system wide trigger
                trigger_udev_update()
            elif (command == 'remove'):
                if (new_raid != pool.raid):
                    e_msg = ('Raid configuration cannot be changed while '
                             'removing disks.')
                    handle_exception(Exception(e_msg), request)
                detached_disks_selected = 0
                for d in disks:  # to be removed
                    if (d.pool is None or d.pool != pool):
                        e_msg = ('Disk ({}) cannot be removed because it does '
                                 'not belong to this '
                                 'pool ({}).').format(d.name, pool.name)
                        handle_exception(Exception(e_msg), request)
                    if re.match('detached-', d.name) is not None:
                        detached_disks_selected += 1
                if detached_disks_selected >= 3:
                    # Artificial constraint but no current btrfs raid level yet
                    # allows for > 2 dev detached and we have a mounted vol.
                    e_msg = ('We currently only support removing two'
                             'detached disks at a time.')
                    handle_exception(Exception(e_msg), request)
                attached_disks_selected = (
                            num_disks_selected - detached_disks_selected)
                remaining_attached_disks = (
                            pool.disk_set.attached().count() - attached_disks_selected)
                if (pool.raid == 'raid0'):
                    e_msg = ('Disks cannot be removed from a pool with this '
                             'raid ({}) configuration.').format(pool.raid)
                    handle_exception(Exception(e_msg), request)

                if (pool.raid == 'raid1' and remaining_attached_disks < 2):
                    e_msg = ('Disks cannot be removed from this pool '
                             'because its raid configuration (raid1) '
                             'requires a minimum of 2 disks.')
                    handle_exception(Exception(e_msg), request)

                if (pool.raid == 'raid10' and remaining_attached_disks < 4):
                    e_msg = ('Disks cannot be removed from this pool '
                             'because its raid configuration (raid10) '
                             'requires a minimum of 4 disks.')
                    handle_exception(Exception(e_msg), request)

                if (pool.raid == 'raid5' and remaining_attached_disks < 2):
                    e_msg = ('Disks cannot be removed from this pool because '
                             'its raid configuration (raid5) requires a '
                             'minimum of 2 disks.')
                    handle_exception(Exception(e_msg), request)

                if (pool.raid == 'raid6' and remaining_attached_disks < 3):
                    e_msg = ('Disks cannot be removed from this pool because '
                             'its raid configuration (raid6) requires a '
                             'minimum of 3 disks.')
                    handle_exception(Exception(e_msg), request)

                usage = pool_usage('/%s/%s' % (settings.MNT_PT, pool.name))
                size_cut = 0
                for d in disks:
                    size_cut += d.size
                if size_cut >= (pool.size - usage):
                    e_msg = ('Removing disks ({}) may shrink the pool by '
                             '{} KB, which is greater than available free '
                             'space {} KB. This is '
                             'not supported.').format(dnames, size_cut, usage)
                    handle_exception(Exception(e_msg), request)

                # TODO: run resize_pool() as async task like start_balance(),
                # particularly important on device delete as it initiates an
                # internal volume balance which cannot be monitored by:
                # btrfs balance status.
                # See https://github.com/rockstor/rockstor-core/issues/1722
                # Hence we need also to add a 'DIY' status / percentage
                # reporting method.
                resize_pool(pool, dnames, add=False)  # None if no action
                # Unlike resize_pool() with add=True a delete has an implicit
                # balance where the deleted disks contents are re-distributed
                # across the remaining disks.

                for d in disks:
                    d.pool = None
                    d.save()

            else:
                e_msg = 'Command ({}) is not supported.'.format(command)
                handle_exception(Exception(e_msg), request)
            pool.size = pool.usage_bound()
            pool.save()
            return Response(PoolInfoSerializer(pool).data)
示例#28
0
    def _update_disk_state():
        """
        A db atomic method to update the database of attached disks / drives.
        Works only on device serial numbers for drive identification.
        Calls scan_disks to establish the current connected drives info.
        Initially removes duplicate by serial number db entries to deal
        with legacy db states and obfuscates all previous device names as they
        are transient. The drive database is then updated with the attached
        disks info and previously known drives no longer found attached are
        marked as offline. All offline drives have their SMART availability and
        activation status removed and all attached drives have their SMART
        availability assessed and activated if available.
        :return: serialized models of attached and missing disks via serial num
        """
        # Acquire a list (namedtupil collection) of attached drives > min size
        disks = scan_disks(settings.MIN_DISK_SIZE)
        serial_numbers_seen = []
        # Make sane our db entries in view of what we know we have attached.
        # Device serial number is only known external unique entry, scan_disks
        # make this so in the case of empty or repeat entries by providing
        # fake serial numbers which are in turn flagged via WebUI as unreliable.
        # 1) scrub all device names with unique but nonsense uuid4
        # 1) mark all offline disks as such via db flag
        # 2) mark all offline disks smart available and enabled flags as False
        # logger.info('update_disk_state() Called')
        for do in Disk.objects.all():
            # Replace all device names with a unique placeholder on each scan
            # N.B. do not optimize by re-using uuid index as this could lead
            # to a non refreshed webui acting upon an entry that is different
            # from that shown to the user.
            do.name = 'detached-' + str(uuid.uuid4()).replace('-', '')
            # Delete duplicate or fake by serial number db disk entries.
            # It makes no sense to save fake serial number drives between scans
            # as on each scan the serial number is re-generated (fake) anyway.
            # Serial numbers beginning with 'fake-serial-' are from scan_disks.
            if (do.serial in serial_numbers_seen) or (re.match(
                    'fake-serial-', do.serial) is not None):
                logger.info('Deleting duplicate or fake (by serial) Disk db '
                            'entry. Serial = %s' % do.serial)
                do.delete()  # django >=1.9 returns a dict of deleted items.
                # Continue onto next db disk object as nothing more to process.
                continue
            # first encounter of this serial in the db so stash it for reference
            serial_numbers_seen.append(deepcopy(do.serial))
            # Look for devices (by serial number) that are in the db but not in
            # our disk scan, ie offline / missing.
            if (do.serial not in [d.serial for d in disks]):
                # update the db entry as offline
                do.offline = True
                # disable S.M.A.R.T available and enabled flags.
                do.smart_available = do.smart_enabled = False
            do.save()  # make sure all updates are flushed to db
        # Our db now has no device name info as all dev names are place holders.
        # Iterate over attached drives to update the db's knowledge of them.
        # Kernel dev names are unique so safe to overwrite our db unique name.
        for d in disks:
            # start with an empty disk object
            dob = None
            # Convert our transient but just scanned so current sda type name
            # to a more useful by-id type name as found in /dev/disk/by-id
            byid_disk_name, is_byid = get_dev_byid_name(d.name, True)
            # If the db has an entry with this disk's serial number then
            # use this db entry and update the device name from our recent scan.
            if (Disk.objects.filter(serial=d.serial).exists()):
                dob = Disk.objects.get(serial=d.serial)
                #dob.name = d.name
                dob.name = byid_disk_name
            else:
                # We have an assumed new disk entry as no serial match in db.
                # Build a new entry for this disk.
                #dob = Disk(name=d.name, serial=d.serial)
                # N.B. we may want to force a fake-serial here if is_byid False,
                # that way we flag as unusable disk as no by-id type name found.
                # It may already have been set though as the only by-id
                # failures so far are virtio disks with no serial so scan_disks
                # will have already given it a fake serial in d.serial.
                dob = Disk(name=byid_disk_name, serial=d.serial)
            # Update the db disk object (existing or new) with our scanned info
            dob.size = d.size
            dob.parted = d.parted
            dob.offline = False  # as we are iterating over attached devices
            dob.model = d.model
            dob.transport = d.transport
            dob.vendor = d.vendor
            dob.btrfs_uuid = d.btrfs_uuid
            # If attached disk has an fs and it isn't btrfs
            if (d.fstype is not None and d.fstype != 'btrfs'):
                dob.btrfs_uuid = None
                dob.parted = True  # overload use of parted as non btrfs flag.
                # N.B. this overload use may become redundant with the addition
                # of the Disk.role field.
            # Update the role field with scan_disks findings, currently only
            # mdraid membership type based on fstype info. In the case of
            # these raid member indicators from scan_disks() we have the
            # current truth provided so update the db role status accordingly.
            # N.B. this if else could be expanded to accommodate other
            # roles based on the fs found
            if d.fstype == 'isw_raid_member' or d.fstype == 'linux_raid_member':
                # We have an indicator of mdraid membership so update existing
                # role info if any.
                # N.B. We have a minor legacy issue in that prior to using json
                # format for the db role field we stored one of 2 strings.
                # if these 2 strings are found then ignore them as we then
                # overwrite with our current finding and in the new json format.
                # I.e. non None could also be a legacy entry so follow overwrite
                # path when legacy entry found by treating as a None entry.
                # TODO: When we reset migrations the following need only check
                # TODO: "dob.role is not None"
                if dob.role is not None and dob.role != 'isw_raid_member' \
                        and dob.role != 'linux_raid_member':
                    # get our known roles into a dictionary
                    known_roles = json.loads(dob.role)
                    # create or update an mdraid dictionary entry
                    known_roles['mdraid'] = str(d.fstype)
                    # return updated dict to json format and store in db object
                    dob.role = json.dumps(known_roles)
                else:  # We have a dob.role = None so just insert our new role.
                    # Also applies to legacy pre json role entries.
                    dob.role = '{"mdraid": "' + d.fstype + '"}'  # json string
            else:  # We know this disk is not an mdraid raid member.
                # No identified role from scan_disks() fstype value (mdraid
                # only for now )so we preserve any prior known roles not
                # exposed by scan_disks but remove the mdraid role if found.
                # TODO: When we reset migrations the following need only check
                # TODO: "dob.role is not None"
                if dob.role is not None and dob.role != 'isw_raid_member' \
                        and dob.role != 'linux_raid_member':
                    # remove mdraid role if found but preserve prior roles
                    # which should now only be in json format
                    known_roles = json.loads(dob.role)
                    if 'mdraid' in known_roles:
                        if len(known_roles) > 1:
                            # mdraid is not the only entry so we have to pull
                            # out only mdraid from dict and convert back to json
                            del known_roles['mdraid']
                            dob.role = json.dumps(known_roles)
                        else:
                            # mdraid was the only entry so we need not bother
                            # with dict edit and json conversion only to end up
                            # with an empty json {} so revert to default 'None'.
                            dob.role = None
                else:  # Empty or legacy role entry.
                    # We have either None or a legacy mdraid role when this disk
                    # is no longer an mdraid member. We can now assert None.
                    dob.role = None
            # If our existing Pool db knows of this disk's pool via it's label:
            if (Pool.objects.filter(name=d.label).exists()):
                # update the disk db object's pool field accordingly.
                dob.pool = Pool.objects.get(name=d.label)

                #this is for backwards compatibility. root pools created
                #before the pool.role migration need this. It can safely be
                #removed a few versions after 3.8-11 or when we reset migrations.
                if (d.root is True):
                    dob.pool.role = 'root'
                    dob.pool.save()
            else:  # this disk is not known to exist in any pool via it's label
                dob.pool = None
            # If no pool has yet been found with this disk's label in and
            # the attached disk is our root disk (flagged by scan_disks)
            if (dob.pool is None and d.root is True):
                # setup our special root disk db entry in Pool
                # TODO: dynamically retrieve raid level.
                p = Pool(name=d.label, raid='single', role='root')
                p.disk_set.add(dob)
                p.save()
                # update disk db object to reflect special root pool status
                dob.pool = p
                dob.save()
                p.size = pool_usage(mount_root(p))[0]
                enable_quota(p)
                p.uuid = btrfs_uuid(dob.name)
                p.save()
            # save our updated db disk object
            dob.save()
        # Update online db entries with S.M.A.R.T availability and status.
        for do in Disk.objects.all():
            # find all the not offline db entries
            if (not do.offline):
                # We have an attached disk db entry.
                # Since our Disk.name model now uses by-id type names we can
                # do cheap matches to the beginnings of these names to find
                # virtio, md, or sdcard devices which are assumed to have no
                # SMART capability.
                # We also disable devices smart support when they have a
                # fake serial number as ascribed by scan_disks as any SMART
                # data collected is then less likely to be wrongly associated
                # with the next device that takes this temporary drive's name.
                # Also note that with no serial number some device types will
                # not have a by-id type name expected by the smart subsystem.
                # This has only been observed in no serial virtio devices.
                if (re.match('fake-serial-', do.serial) is not None) or \
                        (re.match('virtio-|md-|mmc-|nvme-', do.name) is not None):
                    # Virtio disks (named virtio-*), md devices (named md-*),
                    # and an sdcard reader that provides devs named mmc-* have
                    # no smart capability so avoid cluttering logs with
                    # exceptions on probing these with smart.available.
                    # nvme not yet supported by CentOS 7 smartmontools:
                    # https://www.smartmontools.org/ticket/657
                    # Thanks to @snafu in rockstor forum post 1567 for this.
                    do.smart_available = do.smart_enabled = False
                    continue
                # try to establish smart availability and status and update db
                try:
                    # for non ata/sata drives
                    do.smart_available, do.smart_enabled = smart.available(
                        do.name, do.smart_options)
                except Exception, e:
                    logger.exception(e)
                    do.smart_available = do.smart_enabled = False
            do.save()
示例#29
0
    def put(self, request, pid, command):
        """
        resize a pool.
        @pname: pool's name
        @command: 'add' - add a list of disks and hence expand the pool
                  'remove' - remove a list of disks and hence shrink the pool
                  'remount' - remount the pool, to apply changed mount options
                  'quotas' - request pool quota setting change
        """
        with self._handle_exception(request):
            try:
                pool = Pool.objects.get(id=pid)
            except:
                e_msg = 'Pool with id ({}) does not exist.'.format(pid)
                handle_exception(Exception(e_msg), request)

            if (pool.role == 'root' and command != 'quotas'):
                e_msg = ('Edit operations are not allowed on this pool ({}) '
                         'as it contains the operating '
                         'system.').format(pool.name)
                handle_exception(Exception(e_msg), request)

            if (command == 'remount'):
                return self._remount(request, pool)

            if (command == 'quotas'):
                # There is a pending btrfs change that allows for quota state
                # change on unmounted Volumes (pools).
                return self._quotas(request, pool)

            if not pool.is_mounted:
                e_msg = ('Pool member / raid edits require an active mount. '
                         'Please see the "Maintenance required" section.')
                handle_exception(Exception(e_msg), request)

            if command == 'remove' and \
                    request.data.get('disks', []) == ['missing']:
                disks = []
                logger.debug('Remove missing request skipping disk validation')
            else:
                disks = [
                    self._validate_disk_id(diskId, request)
                    for diskId in request.data.get('disks', [])
                ]

            num_disks_selected = len(disks)
            dnames = self._role_filter_disk_names(disks, request)
            new_raid = request.data.get('raid_level', pool.raid)

            if (command == 'add'):
                # Only attached disks can be selected during an add operation.
                num_total_attached_disks = pool.disk_set.attached().count() \
                                  + num_disks_selected
                for d in disks:
                    if (d.pool is not None):
                        e_msg = ('Disk ({}) cannot be added to this pool ({}) '
                                 'because it belongs to another pool ({})'
                                 '.').format(d.name, pool.name, d.pool.name)
                        handle_exception(Exception(e_msg), request)
                    if (d.btrfs_uuid is not None):
                        e_msg = ('Disk ({}) has a BTRFS filesystem from the '
                                 'past. If you really like to add it, wipe it '
                                 'from the Storage -> Disks screen of the '
                                 'web-ui.').format(d.name)
                        handle_exception(Exception(e_msg), request)

                if pool.raid == 'single' and new_raid == 'raid10':
                    # TODO: Consider removing once we have better space calc.
                    # Avoid extreme raid level change upwards (space issues).
                    e_msg = ('Pool migration from {} to {} is not '
                             'supported.').format(pool.raid, new_raid)
                    handle_exception(Exception(e_msg), request)

                if (new_raid == 'raid10' and num_total_attached_disks < 4):
                    e_msg = ('A minimum of 4 drives are required for the '
                             'raid level: raid10.')
                    handle_exception(Exception(e_msg), request)

                if (new_raid == 'raid6' and num_total_attached_disks < 3):
                    e_msg = ('A minimum of 3 drives are required for the '
                             'raid level: raid6.')
                    handle_exception(Exception(e_msg), request)

                if (new_raid == 'raid5' and num_total_attached_disks < 2):
                    e_msg = ('A minimum of 2 drives are required for the '
                             'raid level: raid5.')
                    handle_exception(Exception(e_msg), request)

                if (PoolBalance.objects.filter(
                        pool=pool,
                        status__regex=
                        r'(started|running|cancelling|pausing|paused)').exists(
                        )):  # noqa E501
                    e_msg = ('A Balance process is already running or paused '
                             'for this pool ({}). Resize is not supported '
                             'during a balance process.').format(pool.name)
                    handle_exception(Exception(e_msg), request)

                # _resize_pool_start() add dev mode is quick so no async or tid
                self._resize_pool_start(pool, dnames)
                force = False
                # During dev add we also offer raid level change, if selected
                # blanket apply '-f' to allow for reducing metadata integrity.
                if new_raid != pool.raid:
                    force = True
                # Django-ztask initialization as balance is long running.
                tid = self._balance_start(pool, force=force, convert=new_raid)
                ps = PoolBalance(pool=pool, tid=tid)
                ps.save()

                pool.raid = new_raid
                for d_o in disks:
                    d_o.pool = pool
                    d_o.save()
                # Now we ensure udev info is updated via system wide trigger
                trigger_udev_update()
            elif (command == 'remove'):
                if (new_raid != pool.raid):
                    e_msg = ('Raid configuration cannot be changed while '
                             'removing disks.')
                    handle_exception(Exception(e_msg), request)
                detached_disks_selected = 0
                for d in disks:  # to be removed
                    if (d.pool is None or d.pool != pool):
                        e_msg = ('Disk ({}) cannot be removed because it does '
                                 'not belong to this '
                                 'pool ({}).').format(d.name, pool.name)
                        handle_exception(Exception(e_msg), request)
                    if re.match('detached-', d.name) is not None:
                        detached_disks_selected += 1
                if detached_disks_selected >= 3:
                    # Artificial constraint but no current btrfs raid level yet
                    # allows for > 2 dev detached and we have a mounted vol.
                    e_msg = ('We currently only support removing two'
                             'detached disks at a time.')
                    handle_exception(Exception(e_msg), request)
                attached_disks_selected = (num_disks_selected -
                                           detached_disks_selected)
                remaining_attached_disks = (pool.disk_set.attached().count() -
                                            attached_disks_selected)
                if (pool.raid == 'raid0'):
                    e_msg = ('Disks cannot be removed from a pool with this '
                             'raid ({}) configuration.').format(pool.raid)
                    handle_exception(Exception(e_msg), request)

                if (pool.raid == 'raid1' and remaining_attached_disks < 2):
                    e_msg = ('Disks cannot be removed from this pool '
                             'because its raid configuration (raid1) '
                             'requires a minimum of 2 disks.')
                    handle_exception(Exception(e_msg), request)

                if (pool.raid == 'raid10' and remaining_attached_disks < 4):
                    e_msg = ('Disks cannot be removed from this pool '
                             'because its raid configuration (raid10) '
                             'requires a minimum of 4 disks.')
                    handle_exception(Exception(e_msg), request)

                if (pool.raid == 'raid5' and remaining_attached_disks < 2):
                    e_msg = ('Disks cannot be removed from this pool because '
                             'its raid configuration (raid5) requires a '
                             'minimum of 2 disks.')
                    handle_exception(Exception(e_msg), request)

                if (pool.raid == 'raid6' and remaining_attached_disks < 3):
                    e_msg = ('Disks cannot be removed from this pool because '
                             'its raid configuration (raid6) requires a '
                             'minimum of 3 disks.')
                    handle_exception(Exception(e_msg), request)

                usage = pool_usage('/%s/%s' % (settings.MNT_PT, pool.name))
                size_cut = 0
                for d in disks:
                    size_cut += d.allocated
                if size_cut >= (pool.size - usage):
                    e_msg = ('Removing disks ({}) may shrink the pool by '
                             '{} KB, which is greater than available free '
                             'space {} KB. This is '
                             'not supported.').format(dnames, size_cut, usage)
                    handle_exception(Exception(e_msg), request)

                # Unlike resize_pool_start() with add=True a remove has an
                # implicit balance where the removed disks contents are
                # re-distributed across the remaining pool members.
                # This internal balance cannot currently be monitored by the
                # usual 'btrfs balance status /mnt_pt' command. So we have to
                # use our own mechanism to assess it's status.
                # Django-ztask initialization:
                tid = self._resize_pool_start(pool, dnames, add=False)
                ps = PoolBalance(pool=pool, tid=tid, internal=True)
                ps.save()

                # Setting disk.pool = None for all removed members is redundant
                # as our next disk scan will re-find them until such time as
                # our async task, and it's associated dev remove, has completed
                # it's internal balance. This can take hours.

            else:
                e_msg = 'Command ({}) is not supported.'.format(command)
                handle_exception(Exception(e_msg), request)
            pool.size = pool.usage_bound()
            pool.save()
            return Response(PoolInfoSerializer(pool).data)
示例#30
0
 def pools_usage(self, last_ts):
     """
     This info is not from proc atm, but will eventually be.
     """
     #  collect usage only if the data is more than 30 seconds old
     now = time.mktime(time.gmtime())
     if (now - last_ts < 30):
         return last_ts
     ts = datetime.utcnow().replace(tzinfo=utc)
     for p in Pool.objects.all():
         total_reclaimable = 0
         try:
             #  get usage of all shares in this pool
             pool_device = Disk.objects.filter(pool=p)[0].name
             share_map = {}
             snap_map = {}
             for share in Share.objects.filter(pool=p):
                 share_map[share.qgroup] = share.name
                 for snap in Snapshot.objects.filter(share=share):
                     snap_map[snap.qgroup] = snap.real_name
             usaged = shares_usage(p, pool_device, share_map, snap_map)
             for s in usaged.keys():
                 try:
                     total_reclaimable += (
                         Share.objects.get(name=s).size - usaged[s][1])
                 except:
                     pass
                 su = None
                 try:
                     su = ShareUsage.objects.filter(name=s).latest('id')
                     if ((ts - su.ts).total_seconds() > 90):
                         su = None
                 except Exception, e:
                     e_msg = ('Unable to get latest share usage object '
                              'for share(%s). A new one will be created.'
                              % s)
                     logger.error(e_msg)
                 #  we check for changed in both referenced and exclusive
                 #  usage because in rare cases it's possible for only one
                 #  to change.
                 if ((su is None or su.r_usage != usaged[s][0] or
                      su.e_usage != usaged[s][1])):
                     su = ShareUsage(name=s, r_usage=usaged[s][0],
                                     e_usage=usaged[s][1], ts=ts)
                 else:
                     su.ts = ts
                     su.count = su.count + 1
                 self._save_wrapper(su)
         except Exception, e:
             logger.debug('command exception while getting shares usage '
                          'for pool: %s' % (p.name))
             logger.exception(e)
         try:
             usage = pool_usage('/%s/%s' % (settings.MNT_PT, p.name))
             total_free = usage[2]  # free + reclaimable
             pu = None
             try:
                 pu = PoolUsage.objects.filter(pool=p.name).latest('id')
                 if ((ts - pu.ts).total_seconds() > 90):
                     pu = None
             except Exception, e:
                 e_msg = ('Unable to get latest pool usage object for '
                          'pool(%s). A new one will be created.' % p.name)
                 logger.error(e_msg)
             if ((pu is None or
                  p.size - (pu.free + pu.reclaimable) != usage[1])):
                 pu = PoolUsage(pool=p.name,
                                free=total_free-total_reclaimable,
                                reclaimable=total_reclaimable, ts=ts)
             else:
                 pu.ts = ts
                 pu.count = pu.count + 1
             self._save_wrapper(pu)
示例#31
0
 def _create_root_pool(self, d):
     p = Pool(name=settings.ROOT_POOL, raid='single')
     p.size = pool_usage(mount_root(p, d.name))[0]
     enable_quota(p, '/dev/%s' % d.name)
     p.uuid = btrfs_uuid(d.name)
     return p
示例#32
0
    def post(self, request):
        """
        input is a list of disks, raid_level and name of the pool.
        """
        with self._handle_exception(request):
            disks = [
                self._validate_disk(d, request)
                for d in request.data.get('disks')
            ]
            pname = request.data['pname']
            if (re.match('%s$' % settings.POOL_REGEX, pname) is None):
                e_msg = (
                    'Invalid characters in Pool name. Following '
                    'characters are allowed: letter(a-z or A-Z), digit(0-9), '
                    'hyphen(-), underscore(_) or a period(.).')
                handle_exception(Exception(e_msg), request)

            if (len(pname) > 255):
                e_msg = ('Pool name must be less than 255 characters')
                handle_exception(Exception(e_msg), request)

            if (Pool.objects.filter(name=pname).exists()):
                e_msg = ('Pool(%s) already exists. Choose a different name' %
                         pname)
                handle_exception(Exception(e_msg), request)

            if (Share.objects.filter(name=pname).exists()):
                e_msg = (
                    'A Share with this name(%s) exists. Pool and Share names '
                    'must be distinct. Choose a different name' % pname)
                handle_exception(Exception(e_msg), request)

            for d in disks:
                if (d.btrfs_uuid is not None):
                    e_msg = ('Another BTRFS filesystem exists on this '
                             'disk(%s). Erase the disk and try again.' %
                             d.name)
                    handle_exception(Exception(e_msg), request)

            raid_level = request.data['raid_level']
            if (raid_level not in self.RAID_LEVELS):
                e_msg = ('Unsupported raid level. use one of: {}'.format(
                    self.RAID_LEVELS))
                handle_exception(Exception(e_msg), request)
            # consolidated raid0 & raid 1 disk check
            if (raid_level in self.RAID_LEVELS[1:3] and len(disks) <= 1):
                e_msg = ('At least two disks are required for the raid level: '
                         '%s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[3]):
                if (len(disks) < 4):
                    e_msg = ('A minimum of Four drives are required for the '
                             'raid level: %s' % raid_level)
                    handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[4] and len(disks) < 2):
                e_msg = ('Two or more disks are required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[5] and len(disks) < 3):
                e_msg = ('Three or more disks are required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)

            compression = self._validate_compression(request)
            mnt_options = self._validate_mnt_options(request)
            dnames = [d.name for d in disks]
            p = Pool(name=pname,
                     raid=raid_level,
                     compression=compression,
                     mnt_options=mnt_options)
            p.disk_set.add(*disks)
            p.save()
            # added for loop to save disks
            # appears p.disk_set.add(*disks) was not saving disks in test environment
            for d in disks:
                d.pool = p
                d.save()
            add_pool(p, dnames)
            p.size = pool_usage(mount_root(p))[0]
            p.uuid = btrfs_uuid(dnames[0])
            p.save()
            return Response(PoolInfoSerializer(p).data)
示例#33
0
            try:
                auto_update(enable=False)
                return Response({'enabled': False, })
            except Exception, e:
                msg = ('Failed to disable auto update due to this exception:  '
                       '%s' % e.__str__())
                handle_exception(Exception(msg), request)

        if (command == 'refresh-pool-state'):
            for p in Pool.objects.all():
                fd = p.disk_set.first()
                if (fd is None):
                    p.delete()
                mount_root(p)
                pool_info = get_pool_info(fd.name)
                p.name = pool_info['label']
                p.raid = pool_raid('%s%s' % (settings.MNT_PT, p.name))['data']
                p.size = pool_usage('%s%s' % (settings.MNT_PT, p.name))[0]
                p.save()
            return Response()

        if (command == 'refresh-share-state'):
            for p in Pool.objects.all():
                import_shares(p, request)
            return Response()

        if (command == 'refresh-snapshot-state'):
            for share in Share.objects.all():
                import_snapshots(share)
            return Response()
示例#34
0
                return Response({
                    'enabled': False,
                })
            except Exception, e:
                msg = ('Failed to disable auto update due to this exception:  '
                       '%s' % e.__str__())
                handle_exception(Exception(msg), request)

        if (command == 'refresh-pool-state'):
            for p in Pool.objects.all():
                fd = p.disk_set.first()
                if (fd is None):
                    p.delete()
                mount_root(p)
                pool_info = get_pool_info(fd.name)
                p.name = pool_info['label']
                p.raid = pool_raid('%s%s' % (settings.MNT_PT, p.name))['data']
                p.size = pool_usage('%s%s' % (settings.MNT_PT, p.name))[0]
                p.save()
            return Response()

        if (command == 'refresh-share-state'):
            for p in Pool.objects.all():
                import_shares(p, request)
            return Response()

        if (command == 'refresh-snapshot-state'):
            for share in Share.objects.all():
                import_snapshots(share)
            return Response()
示例#35
0
    def put(self, request, pid, command):
        """
        resize a pool.
        @pname: pool's name
        @command: 'add' - add a list of disks and hence expand the pool
                  'remove' - remove a list of disks and hence shrink the pool
        """
        with self._handle_exception(request):
            try:
                pool = Pool.objects.get(id=pid)
            except:
                e_msg = ('Pool(%d) does not exist.' % pid)
                handle_exception(Exception(e_msg), request)

            if (pool.role == 'root'):
                e_msg = ('Edit operations are not allowed on this Pool(%d) '
                         'as it contains the operating system.' % pid)
                handle_exception(Exception(e_msg), request)

            if (command == 'remount'):
                return self._remount(request, pool)

            disks = [
                self._validate_disk(d, request)
                for d in request.data.get('disks', [])
            ]
            num_new_disks = len(disks)
            dnames = self._role_filter_disk_names(disks, request)
            new_raid = request.data.get('raid_level', pool.raid)
            num_total_disks = (Disk.objects.filter(pool=pool).count() +
                               num_new_disks)
            if (command == 'add'):
                for d in disks:
                    if (d.pool is not None):
                        e_msg = ('Disk(%s) cannot be added to this Pool(%s) '
                                 'because it belongs to another pool(%s)' %
                                 (d.name, pool.name, d.pool.name))
                        handle_exception(Exception(e_msg), request)
                    if (d.btrfs_uuid is not None):
                        e_msg = ('Disk(%s) has a BTRFS filesystem from the '
                                 'past. If you really like to add it, wipe it '
                                 'from the Storage -> Disks screen of the '
                                 'web-ui' % d.name)
                        handle_exception(Exception(e_msg), request)

                if (pool.raid != 'single' and new_raid == 'single'):
                    e_msg = ('Pool migration from %s to %s is not supported.' %
                             (pool.raid, new_raid))
                    handle_exception(Exception(e_msg), request)

                if (new_raid == 'raid10' and num_total_disks < 4):
                    e_msg = ('A minimum of Four drives are required for the '
                             'raid level: raid10')
                    handle_exception(Exception(e_msg), request)

                if (new_raid == 'raid6' and num_total_disks < 3):
                    e_msg = ('A minimum of Three drives are required for the '
                             'raid level: raid6')
                    handle_exception(Exception(e_msg), request)

                if (new_raid == 'raid5' and num_total_disks < 2):
                    e_msg = ('A minimum of Two drives are required for the '
                             'raid level: raid5')
                    handle_exception(Exception(e_msg), request)

                if (PoolBalance.objects.filter(
                        pool=pool,
                        status__regex=
                        r'(started|running|cancelling|pausing|paused)').exists(
                        )):  # noqa E501
                    e_msg = ('A Balance process is already running or paused '
                             'for this pool(%s). Resize is not supported '
                             'during a balance process.' % pool.name)
                    handle_exception(Exception(e_msg), request)

                resize_pool(pool, dnames)
                tid = self._balance_start(pool, convert=new_raid)
                ps = PoolBalance(pool=pool, tid=tid)
                ps.save()

                pool.raid = new_raid
                for d_o in disks:
                    d_o.pool = pool
                    d_o.save()
                # Now we ensure udev info is updated via system wide trigger
                trigger_udev_update()
            elif (command == 'remove'):
                if (new_raid != pool.raid):
                    e_msg = ('Raid configuration cannot be changed while '
                             'removing disks')
                    handle_exception(Exception(e_msg), request)
                for d in disks:
                    if (d.pool is None or d.pool != pool):
                        e_msg = ('Disk(%s) cannot be removed because it does '
                                 'not belong to this Pool(%s)' %
                                 (d.name, pool.name))
                        handle_exception(Exception(e_msg), request)
                remaining_disks = (Disk.objects.filter(pool=pool).count() -
                                   num_new_disks)
                if (pool.raid == 'raid0'):
                    e_msg = ('Disks cannot be removed from a pool with this '
                             'raid(%s) configuration' % pool.raid)
                    handle_exception(Exception(e_msg), request)

                if (pool.raid == 'raid1' and remaining_disks < 2):
                    e_msg = ('Disks cannot be removed from this pool '
                             'because its raid configuration(raid1) '
                             'requires a minimum of 2 disks')
                    handle_exception(Exception(e_msg), request)

                if (pool.raid == 'raid10' and remaining_disks < 4):
                    e_msg = ('Disks cannot be removed from this pool '
                             'because its raid configuration(raid10) '
                             'requires a minimum of 4 disks')
                    handle_exception(Exception(e_msg), request)

                if (pool.raid == 'raid5' and remaining_disks < 2):
                    e_msg = ('Disks cannot be removed from this pool because '
                             'its raid configuration(raid5) requires a '
                             'minimum of 2 disks')
                    handle_exception(Exception(e_msg), request)

                if (pool.raid == 'raid6' and remaining_disks < 3):
                    e_msg = ('Disks cannot be removed from this pool because '
                             'its raid configuration(raid6) requires a '
                             'minimum of 3 disks')
                    handle_exception(Exception(e_msg), request)

                usage = pool_usage('/%s/%s' % (settings.MNT_PT, pool.name))
                size_cut = 0
                for d in disks:
                    size_cut += d.size
                if size_cut >= (pool.size - usage):
                    e_msg = ('Removing these(%s) disks may shrink the pool by '
                             '%dKB, which is greater than available free space'
                             ' %dKB. This is not supported.' %
                             (dnames, size_cut, usage))
                    handle_exception(Exception(e_msg), request)

                resize_pool(pool, dnames, add=False)
                tid = self._balance_start(pool)
                ps = PoolBalance(pool=pool, tid=tid)
                ps.save()

                for d in disks:
                    d.pool = None
                    d.save()

            else:
                e_msg = ('command(%s) is not supported.' % command)
                handle_exception(Exception(e_msg), request)
            pool.size = pool.usage_bound()
            pool.save()
            return Response(PoolInfoSerializer(pool).data)
示例#36
0
    def put(self, request, pname, command):
        """
        resize a pool.
        @pname: pool's name
        @command: 'add' - add a list of disks and hence expand the pool
                  'remove' - remove a list of disks and hence shrink the pool
        """
        with self._handle_exception(request):
            if pname == settings.ROOT_POOL:
                e_msg = (
                    "Edit operations are not allowed on this Pool(%s) " "as it contains the operating system." % pname
                )
                handle_exception(Exception(e_msg), request)
            try:
                pool = Pool.objects.get(name=pname)
            except:
                e_msg = "Pool(%s) does not exist." % pname
                handle_exception(Exception(e_msg), request)

            if command == "remount":
                return self._remount(request, pool)

            disks = [self._validate_disk(d, request) for d in request.data.get("disks")]
            num_new_disks = len(disks)
            if num_new_disks == 0:
                e_msg = "List of disks in the input cannot be empty."
                handle_exception(Exception(e_msg), request)
            dnames = [d.name for d in disks]
            mount_disk = Disk.objects.filter(pool=pool)[0].name
            new_raid = request.data.get("raid_level", pool.raid)
            num_total_disks = Disk.objects.filter(pool=pool).count() + num_new_disks
            usage = pool_usage("/%s/%s" % (settings.MNT_PT, pool.name))
            # free_percent = (usage[2]/usage[0]) * 100
            free_percent = (usage[2] * 100) / usage[0]
            threshold_percent = self.ADD_THRESHOLD * 100
            if command == "add":
                for d in disks:
                    if d.pool is not None:
                        e_msg = (
                            "Disk(%s) cannot be added to this Pool(%s) "
                            "because it belongs to another pool(%s)" % (d.name, pool.name, d.pool.name)
                        )
                        handle_exception(Exception(e_msg), request)
                    if d.btrfs_uuid is not None:
                        e_msg = (
                            "Disk(%s) has a BTRFS filesystem from the "
                            "past. If you really like to add it, wipe it "
                            "from the Storage -> Disks screen of the "
                            "web-ui" % d.name
                        )
                        handle_exception(Exception(e_msg), request)
                if new_raid not in self.SUPPORTED_MIGRATIONS[pool.raid]:
                    e_msg = "Pool migration from %s to %s is not supported." % (pool.raid, new_raid)
                    handle_exception(Exception(e_msg), request)

                if PoolBalance.objects.filter(pool=pool, status__regex=r"(started|running)").exists():
                    e_msg = (
                        "A Balance process is already running for this "
                        "pool(%s). Resize is not supported during a "
                        "balance process." % pool.name
                    )
                    handle_exception(Exception(e_msg), request)

                if free_percent < threshold_percent:
                    e_msg = (
                        "Resize is only supported when there is at least "
                        "%d percent free space available. But currently "
                        "only %d percent is free. Remove some data and "
                        "try again." % (threshold_percent, free_percent)
                    )
                    handle_exception(Exception(e_msg), request)

                if new_raid != pool.raid:
                    if (pool.raid in ("single", "raid0")) and new_raid in ("raid1", "raid10"):
                        cur_num_disks = num_total_disks - num_new_disks
                        if num_new_disks < cur_num_disks:
                            e_msg = (
                                "For single/raid0 to raid1/raid10 "
                                "conversion, at least as many as present "
                                "number of disks must be added. %d "
                                "disks are provided, but at least %d are "
                                "required." % (num_new_disks, cur_num_disks)
                            )
                            handle_exception(Exception(e_msg), request)

                resize_pool(pool, mount_disk, dnames)
                balance_pid = balance_start(pool, mount_disk, convert=new_raid)
                ps = PoolBalance(pool=pool, pid=balance_pid)
                ps.save()

                pool.raid = new_raid
                for d_o in disks:
                    d_o.pool = pool
                    d_o.save()

            elif command == "remove":
                if new_raid != pool.raid:
                    e_msg = "Raid configuration cannot be changed while " "removing disks"
                    handle_exception(Exception(e_msg), request)
                for d in disks:
                    if d.pool is None or d.pool != pool:
                        e_msg = "Disk(%s) cannot be removed because it does " "not belong to this Pool(%s)" % (
                            d.name,
                            pool.name,
                        )
                        handle_exception(Exception(e_msg), request)
                remaining_disks = Disk.objects.filter(pool=pool).count() - num_new_disks
                if pool.raid in ("raid0", "single"):
                    e_msg = "Disks cannot be removed from a pool with this " "raid(%s) configuration" % pool.raid
                    handle_exception(Exception(e_msg), request)

                if pool.raid in ("raid5", "raid6"):
                    e_msg = "Disk removal is not supported for pools with " "raid5/6 configuration"
                    handle_exception(Exception(e_msg), request)

                if pool.raid == "raid10":
                    if num_new_disks != 2:
                        e_msg = (
                            "Only two disks can be removed at once from "
                            "this pool because of its raid "
                            "configuration(%s)" % pool.raid
                        )
                        handle_exception(Exception(e_msg), request)
                    elif remaining_disks < 4:
                        e_msg = (
                            "Disks cannot be removed from this pool "
                            "because its raid configuration(%s) "
                            "requires a minimum of 4 disks" % pool.raid
                        )
                        handle_exception(Exception(e_msg), request)

                elif pool.raid == "raid1":
                    if num_new_disks != 1:
                        e_msg = (
                            "Only one disk can be removed at once from "
                            "this pool because of its raid "
                            "configuration(%s)" % pool.raid
                        )
                        handle_exception(Exception(e_msg), request)
                    elif remaining_disks < 2:
                        e_msg = (
                            "Disks cannot be removed from this pool "
                            "because its raid configuration(%s) "
                            "requires a minimum of 2 disks" % pool.raid
                        )
                        handle_exception(Exception(e_msg), request)

                threshold_percent = 100 - threshold_percent
                if free_percent < threshold_percent:
                    e_msg = (
                        "Removing disks is only supported when there is "
                        "at least %d percent free space available. But "
                        "currently only %d percent is free. Remove some "
                        "data and try again." % (threshold_percent, free_percent)
                    )
                    handle_exception(Exception(e_msg), request)

                resize_pool(pool, mount_disk, dnames, add=False)
                balance_pid = balance_start(pool, mount_disk)
                ps = PoolBalance(pool=pool, pid=balance_pid)
                ps.save()

                for d in disks:
                    d.pool = None
                    d.save()

            else:
                e_msg = "command(%s) is not supported." % command
                handle_exception(Exception(e_msg), request)
            usage = pool_usage("/%s/%s" % (settings.MNT_PT, pool.name))
            pool.size = usage[0]
            pool.save()
            return Response(PoolInfoSerializer(pool).data)
示例#37
0
    def post(self, request):
        """
        input is a list of disks, raid_level and name of the pool.
        """
        with self._handle_exception(request):
            disks = [self._validate_disk(d, request) for d in
                     request.data.get('disks')]
            pname = request.data['pname']
            if (re.match('%s$' % settings.POOL_REGEX, pname) is None):
                e_msg = ('Pool name must start with a alphanumeric(a-z0-9) '
                         'character and can be followed by any of the '
                         'following characters: letter(a-z), digits(0-9), '
                         'hyphen(-), underscore(_) or a period(.).')
                handle_exception(Exception(e_msg), request)

            if (len(pname) > 255):
                e_msg = ('Pool name must be less than 255 characters')
                handle_exception(Exception(e_msg), request)

            if (Pool.objects.filter(name=pname).exists()):
                e_msg = ('Pool(%s) already exists. Choose a different name' % pname)
                handle_exception(Exception(e_msg), request)

            if (Share.objects.filter(name=pname).exists()):
                e_msg = ('A Share with this name(%s) exists. Pool and Share names '
                         'must be distinct. Choose a different name' % pname)
                handle_exception(Exception(e_msg), request)

            for d in disks:
                if (d.btrfs_uuid is not None):
                    e_msg = ('Another BTRFS filesystem exists on this '
                             'disk(%s). Erase the disk and try again.'
                             % d.name)
                    handle_exception(Exception(e_msg), request)

            raid_level = request.data['raid_level']
            if (raid_level not in self.RAID_LEVELS):
                e_msg = ('Unsupported raid level. use one of: {}'.format(self.RAID_LEVELS))
                handle_exception(Exception(e_msg), request)
            # consolidated raid0 & raid 1 disk check
            if (raid_level in self.RAID_LEVELS[1:3] and len(disks) <= 1):
                e_msg = ('At least two disks are required for the raid level: '
                         '%s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[3]):
                if (len(disks) < 4):
                    e_msg = ('A minimum of Four drives are required for the '
                             'raid level: %s' % raid_level)
                    handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[4] and len(disks) < 2):
                e_msg = ('Two or more disks are required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[5] and len(disks) < 3):
                e_msg = ('Three or more disks are required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)

            compression = self._validate_compression(request)
            mnt_options = self._validate_mnt_options(request)
            dnames = [d.name for d in disks]
            p = Pool(name=pname, raid=raid_level, compression=compression,
                     mnt_options=mnt_options)
            p.disk_set.add(*disks)
            p.save()
            # added for loop to save disks
            # appears p.disk_set.add(*disks) was not saving disks in test environment
            for d in disks:
                d.pool = p
                d.save()
            add_pool(p, dnames)
            p.size = pool_usage(mount_root(p))[0]
            p.uuid = btrfs_uuid(dnames[0])
            p.save()
            return Response(PoolInfoSerializer(p).data)
示例#38
0
    def put(self, request, pid, command):
        """
        resize a pool.
        @pname: pool's name
        @command: 'add' - add a list of disks and hence expand the pool
                  'remove' - remove a list of disks and hence shrink the pool
        """
        with self._handle_exception(request):
            try:
                pool = Pool.objects.get(id=pid)
            except:
                e_msg = 'Pool with id ({}) does not exist.'.format(pid)
                handle_exception(Exception(e_msg), request)

            if (pool.role == 'root' and command != 'quotas'):
                e_msg = ('Edit operations are not allowed on this pool ({}) '
                         'as it contains the operating '
                         'system.').format(pool.name)
                handle_exception(Exception(e_msg), request)

            if (command == 'remount'):
                return self._remount(request, pool)

            if (command == 'quotas'):
                return self._quotas(request, pool)

            disks = [
                self._validate_disk(d, request)
                for d in request.data.get('disks', [])
            ]
            num_new_disks = len(disks)
            dnames = self._role_filter_disk_names(disks, request)
            new_raid = request.data.get('raid_level', pool.raid)
            num_total_disks = (Disk.objects.filter(pool=pool).count() +
                               num_new_disks)
            if (command == 'add'):
                for d in disks:
                    if (d.pool is not None):
                        e_msg = ('Disk ({}) cannot be added to this pool ({}) '
                                 'because it belongs to another pool ({})'
                                 '.').format(d.name, pool.name, d.pool.name)
                        handle_exception(Exception(e_msg), request)
                    if (d.btrfs_uuid is not None):
                        e_msg = ('Disk ({}) has a BTRFS filesystem from the '
                                 'past. If you really like to add it, wipe it '
                                 'from the Storage -> Disks screen of the '
                                 'web-ui.').format(d.name)
                        handle_exception(Exception(e_msg), request)

                if pool.raid == 'single' and new_raid == 'raid10':
                    # TODO: Consider removing once we have better space calc.
                    # Avoid extreme raid level change upwards (space issues).
                    e_msg = ('Pool migration from {} to {} is not '
                             'supported.').format(pool.raid, new_raid)
                    handle_exception(Exception(e_msg), request)

                if (new_raid == 'raid10' and num_total_disks < 4):
                    e_msg = ('A minimum of 4 drives are required for the '
                             'raid level: raid10.')
                    handle_exception(Exception(e_msg), request)

                if (new_raid == 'raid6' and num_total_disks < 3):
                    e_msg = ('A minimum of 3 drives are required for the '
                             'raid level: raid6.')
                    handle_exception(Exception(e_msg), request)

                if (new_raid == 'raid5' and num_total_disks < 2):
                    e_msg = ('A minimum of 2 drives are required for the '
                             'raid level: raid5.')
                    handle_exception(Exception(e_msg), request)

                if (PoolBalance.objects.filter(
                        pool=pool,
                        status__regex=
                        r'(started|running|cancelling|pausing|paused)').exists(
                        )):  # noqa E501
                    e_msg = ('A Balance process is already running or paused '
                             'for this pool ({}). Resize is not supported '
                             'during a balance process.').format(pool.name)
                    handle_exception(Exception(e_msg), request)

                resize_pool(pool, dnames)
                # During dev add we also offer raid level change, if selected
                # blanket apply '-f' to allow for reducing metadata integrity.
                force = False
                if new_raid != pool.raid:
                    force = True
                tid = self._balance_start(pool, force=force, convert=new_raid)
                ps = PoolBalance(pool=pool, tid=tid)
                ps.save()

                pool.raid = new_raid
                for d_o in disks:
                    d_o.pool = pool
                    d_o.save()
                # Now we ensure udev info is updated via system wide trigger
                trigger_udev_update()
            elif (command == 'remove'):
                if (new_raid != pool.raid):
                    e_msg = ('Raid configuration cannot be changed while '
                             'removing disks.')
                    handle_exception(Exception(e_msg), request)
                for d in disks:
                    if (d.pool is None or d.pool != pool):
                        e_msg = ('Disk ({}) cannot be removed because it does '
                                 'not belong to this '
                                 'pool ({}).').format(d.name, pool.name)
                        handle_exception(Exception(e_msg), request)
                remaining_disks = (Disk.objects.filter(pool=pool).count() -
                                   num_new_disks)
                if (pool.raid == 'raid0'):
                    e_msg = ('Disks cannot be removed from a pool with this '
                             'raid ({}) configuration.').format(pool.raid)
                    handle_exception(Exception(e_msg), request)

                if (pool.raid == 'raid1' and remaining_disks < 2):
                    e_msg = ('Disks cannot be removed from this pool '
                             'because its raid configuration (raid1) '
                             'requires a minimum of 2 disks.')
                    handle_exception(Exception(e_msg), request)

                if (pool.raid == 'raid10' and remaining_disks < 4):
                    e_msg = ('Disks cannot be removed from this pool '
                             'because its raid configuration (raid10) '
                             'requires a minimum of 4 disks.')
                    handle_exception(Exception(e_msg), request)

                if (pool.raid == 'raid5' and remaining_disks < 2):
                    e_msg = ('Disks cannot be removed from this pool because '
                             'its raid configuration (raid5) requires a '
                             'minimum of 2 disks.')
                    handle_exception(Exception(e_msg), request)

                if (pool.raid == 'raid6' and remaining_disks < 3):
                    e_msg = ('Disks cannot be removed from this pool because '
                             'its raid configuration (raid6) requires a '
                             'minimum of 3 disks.')
                    handle_exception(Exception(e_msg), request)

                usage = pool_usage('/%s/%s' % (settings.MNT_PT, pool.name))
                size_cut = 0
                for d in disks:
                    size_cut += d.size
                if size_cut >= (pool.size - usage):
                    e_msg = ('Removing disks ({}) may shrink the pool by '
                             '{} KB, which is greater than available free '
                             'space {} KB. This is '
                             'not supported.').format(dnames, size_cut, usage)
                    handle_exception(Exception(e_msg), request)

                resize_pool(pool, dnames, add=False)
                tid = self._balance_start(pool)
                ps = PoolBalance(pool=pool, tid=tid)
                ps.save()

                for d in disks:
                    d.pool = None
                    d.save()

            else:
                e_msg = 'Command ({}) is not supported.'.format(command)
                handle_exception(Exception(e_msg), request)
            pool.size = pool.usage_bound()
            pool.save()
            return Response(PoolInfoSerializer(pool).data)
示例#39
0
    def put(self, request, pid, command):
        """
        resize a pool.
        @pname: pool's name
        @command: 'add' - add a list of disks and hence expand the pool
                  'remove' - remove a list of disks and hence shrink the pool
                  'remount' - remount the pool, to apply changed mount options
                  'quotas' - request pool quota setting change
        """
        with self._handle_exception(request):
            try:
                pool = Pool.objects.get(id=pid)
            except:
                e_msg = "Pool with id ({}) does not exist.".format(pid)
                handle_exception(Exception(e_msg), request)

            if pool.role == "root" and command != "quotas":
                e_msg = ("Edit operations are not allowed on this pool ({}) "
                         "as it contains the operating "
                         "system.").format(pool.name)
                handle_exception(Exception(e_msg), request)

            if command == "remount":
                return self._remount(request, pool)

            if command == "quotas":
                # There is a pending btrfs change that allows for quota state
                # change on unmounted Volumes (pools).
                return self._quotas(request, pool)

            # Establish missing and detached disk removal request flag defaults:
            remove_missing_disk_request = False
            all_members_detached = False
            if command == "remove" and request.data.get("disks",
                                                        []) == ["missing"]:
                remove_missing_disk_request = True
            if (pool.disk_set.filter(name__startswith="detached-").count() ==
                    pool.disk_set.count()):
                all_members_detached = True

            if not pool.is_mounted:
                # If we are asked to remove the last disk in a pool and it's detached
                # then user has already been notified to not remove it if it's to be
                # re-attached. So skip our mount exception as not possible anyway unless
                # re-attached and we have already indicated that possible path.
                # All works accounts for all pool members in detached state.
                if all_members_detached:
                    logger.info(
                        "Skipping mount requirement: all pool's member are detached."
                    )
                else:
                    e_msg = (
                        "Pool member / raid edits require an active mount. "
                        'Please see the "Maintenance required" section.')
                    handle_exception(Exception(e_msg), request)

            if remove_missing_disk_request:
                disks = []
                logger.debug(
                    "Remove missing request, so skipping disk validation")
            else:
                disks = [
                    self._validate_disk_id(diskId, request)
                    for diskId in request.data.get("disks", [])
                ]

            num_disks_selected = len(disks)
            dnames = self._role_filter_disk_names(disks, request)
            new_raid = request.data.get("raid_level", pool.raid)

            if command == "add":
                # Only attached disks can be selected during an add operation.
                num_total_attached_disks = (pool.disk_set.attached().count() +
                                            num_disks_selected)
                for d in disks:
                    if d.pool is not None:
                        e_msg = ("Disk ({}) cannot be added to this pool ({}) "
                                 "because it belongs to another pool ({})"
                                 ".").format(d.name, pool.name, d.pool.name)
                        handle_exception(Exception(e_msg), request)
                    if d.btrfs_uuid is not None:
                        e_msg = ("Disk ({}) has a BTRFS filesystem from the "
                                 "past. If you really like to add it, wipe it "
                                 "from the Storage -> Disks screen of the "
                                 "web-ui.").format(d.name)
                        handle_exception(Exception(e_msg), request)

                if pool.raid == "single" and new_raid == "raid10":
                    # TODO: Consider removing once we have better space calc.
                    # Avoid extreme raid level change upwards (space issues).
                    e_msg = ("Pool migration from {} to {} is not supported."
                             ).format(pool.raid, new_raid)
                    handle_exception(Exception(e_msg), request)

                if new_raid == "raid10" and num_total_attached_disks < 4:
                    e_msg = ("A minimum of 4 drives are required for the "
                             "raid level: raid10.")
                    handle_exception(Exception(e_msg), request)

                if new_raid == "raid6" and num_total_attached_disks < 3:
                    e_msg = ("A minimum of 3 drives are required for the "
                             "raid level: raid6.")
                    handle_exception(Exception(e_msg), request)

                if new_raid == "raid5" and num_total_attached_disks < 2:
                    e_msg = ("A minimum of 2 drives are required for the "
                             "raid level: raid5.")
                    handle_exception(Exception(e_msg), request)

                if PoolBalance.objects.filter(
                        pool=pool,
                        status__regex=
                        r"(started|running|cancelling|pausing|paused)",
                ).exists():  # noqa E501
                    e_msg = ("A Balance process is already running or paused "
                             "for this pool ({}). Resize is not supported "
                             "during a balance process.").format(pool.name)
                    handle_exception(Exception(e_msg), request)

                # _resize_pool_start() add dev mode is quick so no async or tid
                self._resize_pool_start(pool, dnames)
                force = False
                # During dev add we also offer raid level change, if selected
                # blanket apply '-f' to allow for reducing metadata integrity.
                if new_raid != pool.raid:
                    force = True
                # Django-ztask initialization as balance is long running.
                tid = self._balance_start(pool, force=force, convert=new_raid)
                ps = PoolBalance(pool=pool, tid=tid)
                ps.save()

                pool.raid = new_raid
                for d_o in disks:
                    d_o.pool = pool
                    d_o.save()
                # Now we ensure udev info is updated via system wide trigger
                trigger_udev_update()

            elif command == "remove":
                if new_raid != pool.raid:
                    e_msg = "Raid configuration cannot be changed while removing disks."
                    handle_exception(Exception(e_msg), request)
                detached_disks_selected = 0
                for d in disks:  # to be removed
                    if d.pool is None or d.pool != pool:
                        e_msg = ("Disk ({}) cannot be removed because it does "
                                 "not belong to this "
                                 "pool ({}).").format(d.name, pool.name)
                        handle_exception(Exception(e_msg), request)
                    if re.match("detached-", d.name) is not None:
                        detached_disks_selected += 1
                if detached_disks_selected >= 2:
                    # We translate the removal of a detached device into:
                    # "btrfs device delete missing mnt_pt"
                    # but only when appropriate, this removes the first 'missing' dev.
                    # A detached disk is not necessarily missing, but an indication of
                    # prior pool association.
                    e_msg = (
                        "Detached disk selection is limited to a single device. "
                        "If all Pool members are detached all will be removed "
                        "and their pool automatically deleted there after.")
                    handle_exception(Exception(e_msg), request)
                attached_disks_selected = num_disks_selected - detached_disks_selected
                remaining_attached_disks = (pool.disk_set.attached().count() -
                                            attached_disks_selected)
                # Add check for attempt to remove detached & attached disks concurrently
                if detached_disks_selected > 0 and attached_disks_selected > 0:
                    e_msg = (
                        "Mixed detached and attached disk selection is "
                        "not supported. Limit your selection to only attached "
                        "disks, or a single detached disk.")
                    handle_exception(Exception(e_msg), request)
                # Skip all further sanity checks when all members are detached.
                if not all_members_detached:
                    if pool.raid == "raid0":
                        e_msg = (
                            "Disks cannot be removed from a pool with this "
                            "raid ({}) configuration.").format(pool.raid)
                        handle_exception(Exception(e_msg), request)

                    if pool.raid == "raid1" and remaining_attached_disks < 2:
                        e_msg = ("Disks cannot be removed from this pool "
                                 "because its raid configuration (raid1) "
                                 "requires a minimum of 2 disks.")
                        handle_exception(Exception(e_msg), request)

                    if pool.raid == "raid10" and remaining_attached_disks < 4:
                        e_msg = ("Disks cannot be removed from this pool "
                                 "because its raid configuration (raid10) "
                                 "requires a minimum of 4 disks.")
                        handle_exception(Exception(e_msg), request)

                    if pool.raid == "raid5" and remaining_attached_disks < 2:
                        e_msg = (
                            "Disks cannot be removed from this pool because "
                            "its raid configuration (raid5) requires a "
                            "minimum of 2 disks.")
                        handle_exception(Exception(e_msg), request)

                    if pool.raid == "raid6" and remaining_attached_disks < 3:
                        e_msg = (
                            "Disks cannot be removed from this pool because "
                            "its raid configuration (raid6) requires a "
                            "minimum of 3 disks.")
                        handle_exception(Exception(e_msg), request)

                    usage = pool_usage("/{}/{}".format(settings.MNT_PT,
                                                       pool.name))
                    size_cut = 0
                    for d in disks:  # to be removed
                        size_cut += d.allocated
                    available_free = pool.size - usage
                    if size_cut >= available_free:
                        e_msg = ("Removing disks ({}) may shrink the pool by "
                                 "{} KB, which is greater than available free "
                                 "space {} KB. This is "
                                 "not supported.").format(
                                     dnames, size_cut, available_free)
                        handle_exception(Exception(e_msg), request)

                    # Unlike resize_pool_start() with add=True a remove has an
                    # implicit balance where the removed disks contents are
                    # re-distributed across the remaining pool members.
                    # This internal balance cannot currently be monitored by the
                    # usual 'btrfs balance status /mnt_pt' command. So we have to
                    # use our own mechanism to assess it's status.
                    # Django-ztask initialization:
                    tid = self._resize_pool_start(pool, dnames, add=False)
                    ps = PoolBalance(pool=pool, tid=tid, internal=True)
                    ps.save()
                    # Setting disk.pool = None for all removed members is redundant
                    # as our next disk scan will re-find them until such time as
                    # our async task, and it's associated dev remove, has completed
                    # it's internal balance. This can take hours. Except for db only
                    # event of all_members_detached.

                else:  # all_members_detached:
                    # If all members are detached then delete pool associations for all.
                    # We cannot mount and so cannot perform any resize or any further
                    # pool member validation anyway.
                    # N.B. on next pool refresh, no members leads to pool removal.
                    for d in pool.disk_set.all():
                        d.pool = None
                        d.save()

            else:
                e_msg = "Command ({}) is not supported.".format(command)
                handle_exception(Exception(e_msg), request)
            pool.size = pool.usage_bound()
            pool.save()
            return Response(PoolInfoSerializer(pool).data)
示例#40
0
    def post(self, request):
        """
        input is a list of disks, raid_level and name of the pool.
        """
        with self._handle_exception(request):
            disks = [self._validate_disk(d, request) for d in request.data.get("disks")]
            pname = request.data["pname"]
            if re.match("%s$" % settings.POOL_REGEX, pname) is None:
                e_msg = (
                    "Pool name must start with a alphanumeric(a-z0-9) "
                    "character and can be followed by any of the "
                    "following characters: letter(a-z), digits(0-9), "
                    "hyphen(-), underscore(_) or a period(.)."
                )
                handle_exception(Exception(e_msg), request)

            if Pool.objects.filter(name=pname).exists():
                e_msg = "Pool(%s) already exists. Choose a different name" % pname
                handle_exception(Exception(e_msg), request)

            if Share.objects.filter(name=pname).exists():
                e_msg = (
                    "A Share with this name(%s) exists. Pool and Share names "
                    "must be distinct. Choose a different name" % pname
                )
                handle_exception(Exception(e_msg), request)

            for d in disks:
                if d.btrfs_uuid is not None:
                    e_msg = (
                        "Another BTRFS filesystem exists on this " "disk(%s). Erase the disk and try again." % d.name
                    )
                    handle_exception(Exception(e_msg), request)

            raid_level = request.data["raid_level"]
            if raid_level not in self.RAID_LEVELS:
                e_msg = "Unsupported raid level. use one of: {}".format(self.RAID_LEVELS)
                handle_exception(Exception(e_msg), request)
            # consolidated raid0 & raid 1 disk check
            if raid_level in self.RAID_LEVELS[1:3] and len(disks) <= 1:
                e_msg = "At least two disks are required for the raid level: " "%s" % raid_level
                handle_exception(Exception(e_msg), request)
            if raid_level == self.RAID_LEVELS[3]:
                if len(disks) < 4:
                    e_msg = "A minimum of Four drives are required for the " "raid level: %s" % raid_level
                    handle_exception(Exception(e_msg), request)
            if raid_level == self.RAID_LEVELS[4] and len(disks) < 2:
                e_msg = "Two or more disks are required for the raid " "level: %s" % raid_level
                handle_exception(Exception(e_msg), request)
            if raid_level == self.RAID_LEVELS[5] and len(disks) < 3:
                e_msg = "Three or more disks are required for the raid " "level: %s" % raid_level
                handle_exception(Exception(e_msg), request)

            compression = self._validate_compression(request)
            mnt_options = self._validate_mnt_options(request)
            dnames = [d.name for d in disks]
            p = Pool(name=pname, raid=raid_level, compression=compression, mnt_options=mnt_options)
            add_pool(p, dnames)
            p.size = pool_usage(mount_root(p, dnames[0]))[0]
            p.uuid = btrfs_uuid(dnames[0])
            p.disk_set.add(*disks)
            p.save()
            # added for loop to save disks
            # appears p.disk_set.add(*disks) was not saving disks in test environment
            for d in disks:
                d.pool = p
                d.save()
            return Response(PoolInfoSerializer(p).data)
示例#41
0
 def _create_root_pool(self, d):
     p = Pool(name=settings.ROOT_POOL, raid='single')
     p.size = pool_usage(mount_root(p, d.name))[0]
     enable_quota(p, '/dev/%s' % d.name)
     p.uuid = btrfs_uuid(d.name)
     return p
示例#42
0
 def _update_disk_state():
     """
     A db atomic method to update the database of attached disks / drives.
     Works only on device serial numbers for drive identification.
     Calls scan_disks to establish the current connected drives info.
     Initially removes duplicate by serial number db entries to deal
     with legacy db states and obfuscates all previous device names as they
     are transient. The drive database is then updated with the attached
     disks info and previously known drives no longer found attached are
     marked as offline. All offline drives have their SMART availability and
     activation status removed and all attached drives have their SMART
     availability assessed and activated if available.
     :return: serialized models of attached and missing disks via serial num
     """
     # Acquire a list (namedtupil collection) of attached drives > min size
     disks = scan_disks(settings.MIN_DISK_SIZE)
     serial_numbers_seen = []
     # Make sane our db entries in view of what we know we have attached.
     # Device serial number is only known external unique entry, scan_disks
     # make this so in the case of empty or repeat entries by providing
     # fake serial numbers which are in turn flagged via WebUI as unreliable.
     # 1) scrub all device names with unique but nonsense uuid4
     # 1) mark all offline disks as such via db flag
     # 2) mark all offline disks smart available and enabled flags as False
     logger.info('update_disk_state() Called')
     for do in Disk.objects.all():
         # Replace all device names with a unique placeholder on each scan
         # N.B. do not optimize by re-using uuid index as this could lead
         # to a non refreshed webui acting upon an entry that is different
         # from that shown to the user.
         do.name = str(uuid.uuid4()).replace('-', '')  # 32 chars long
         # Delete duplicate or fake by serial number db disk entries.
         # It makes no sense to save fake serial number drives between scans
         # as on each scan the serial number is re-generated anyway.
         if (do.serial in serial_numbers_seen) or (len(do.serial) == 48):
             logger.info('Deleting duplicate or fake (by serial) Disk db '
                         'entry. Serial = %s' % do.serial)
             do.delete()  # django >=1.9 returns a dict of deleted items.
             # Continue onto next db disk object as nothing more to process.
             continue
         # first encounter of this serial in the db so stash it for reference
         serial_numbers_seen.append(deepcopy(do.serial))
         # Look for devices (by serial number) that are in the db but not in
         # our disk scan, ie offline / missing.
         if (do.serial not in [d.serial for d in disks]):
             # update the db entry as offline
             do.offline = True
             # disable S.M.A.R.T available and enabled flags.
             do.smart_available = do.smart_enabled = False
         do.save()  # make sure all updates are flushed to db
     # Our db now has no device name info as all dev names are place holders.
     # Iterate over attached drives to update the db's knowledge of them.
     # Kernel dev names are unique so safe to overwrite our db unique name.
     for d in disks:
         # start with an empty disk object
         dob = None
         # If the db has an entry with this disk's serial number then
         # use this db entry and update the device name from our recent scan.
         if (Disk.objects.filter(serial=d.serial).exists()):
             dob = Disk.objects.get(serial=d.serial)
             dob.name = d.name
         else:
             # We have an assumed new disk entry as no serial match in db.
             # Build a new entry for this disk.
             dob = Disk(name=d.name, serial=d.serial)
         # Update the db disk object (existing or new) with our scanned info
         dob.size = d.size
         dob.parted = d.parted
         dob.offline = False  # as we are iterating over attached devices
         dob.model = d.model
         dob.transport = d.transport
         dob.vendor = d.vendor
         dob.btrfs_uuid = d.btrfs_uuid
         # If attached disk has an fs and it isn't btrfs
         if (d.fstype is not None and d.fstype != 'btrfs'):
             dob.btrfs_uuid = None
             dob.parted = True
         # If our existing Pool db knows of this disk's pool via it's label:
         if (Pool.objects.filter(name=d.label).exists()):
             # update the disk db object's pool field accordingly.
             dob.pool = Pool.objects.get(name=d.label)
         else:  # this disk is not known to exist in any pool via it's label
             dob.pool = None
         # If no pool has yet been found with this disk's label in and
         # the attached disk is our root disk (flagged by scan_disks)
         if (dob.pool is None and d.root is True):
             # setup our special root disk db entry in Pool
             p = Pool(name=settings.ROOT_POOL, raid='single')
             p.disk_set.add(dob)
             p.save()
             # update disk db object to reflect special root pool status
             dob.pool = p
             dob.save()
             p.size = pool_usage(mount_root(p))[0]
             enable_quota(p)
             p.uuid = btrfs_uuid(dob.name)
             p.save()
         # save our updated db disk object
         dob.save()
     # Update online db entries with S.M.A.R.T availability and status.
     for do in Disk.objects.all():
         # find all the not offline db entries
         if (not do.offline):
             # We have an attached disk db entry
             if re.match('vd', do.name):
                 # Virtio disks (named vd*) have no smart capability.
                 # avoids cluttering logs with exceptions on these devices.
                 do.smart_available = do.smart_enabled = False
                 continue
             # try to establish smart availability and status and update db
             try:
                 # for non ata/sata drives
                 do.smart_available, do.smart_enabled = smart.available(
                     do.name)
             except Exception, e:
                 logger.exception(e)
                 do.smart_available = do.smart_enabled = False
         do.save()
示例#43
0
    def put(self, request, pname, command):
        """
        resize a pool.
        @pname: pool's name
        @command: 'add' - add a list of disks and hence expand the pool
                  'remove' - remove a list of disks and hence shrink the pool
        """
        with self._handle_exception(request):
            if (pname == settings.ROOT_POOL):
                e_msg = ('Edit operations are not allowed on this Pool(%s) '
                         'as it contains the operating system.' % pname)
                handle_exception(Exception(e_msg), request)
            try:
                pool = Pool.objects.get(name=pname)
            except:
                e_msg = ('Pool(%s) does not exist.' % pname)
                handle_exception(Exception(e_msg), request)

            if (command == 'remount'):
                return self._remount(request, pool)

            disks = [self._validate_disk(d, request) for d in
                     request.data.get('disks')]
            num_new_disks = len(disks)
            if (num_new_disks == 0):
                e_msg = ('List of disks in the input cannot be empty.')
                handle_exception(Exception(e_msg), request)
            dnames = [d.name for d in disks]
            mount_disk = Disk.objects.filter(pool=pool)[0].name
            new_raid = request.data.get('raid_level', pool.raid)
            num_total_disks = (Disk.objects.filter(pool=pool).count() +
                               num_new_disks)
            usage = pool_usage('/%s/%s' % (settings.MNT_PT, pool.name))
            # free_percent = (usage[2]/usage[0]) * 100
            free_percent = (usage[2]* 100)/usage[0]
            threshold_percent = self.ADD_THRESHOLD * 100
            if (command == 'add'):
                for d in disks:
                    if (d.pool is not None):
                        e_msg = ('Disk(%s) cannot be added to this Pool(%s) '
                                 'because it belongs to another pool(%s)' %
                                 (d.name, pool.name, d.pool.name))
                        handle_exception(Exception(e_msg), request)
                    if (d.btrfs_uuid is not None):
                        e_msg = ('Disk(%s) has a BTRFS filesystem from the '
                                 'past. If you really like to add it, wipe it '
                                 'from the Storage -> Disks screen of the '
                                 'web-ui' % d.name)
                        handle_exception(Exception(e_msg), request)
                if (new_raid not in self.SUPPORTED_MIGRATIONS[pool.raid]):
                    e_msg = ('Pool migration from %s to %s is not supported.'
                             % (pool.raid, new_raid))
                    handle_exception(Exception(e_msg), request)

                if (PoolBalance.objects.filter(
                        pool=pool,
                        status__regex=r'(started|running)').exists()):
                    e_msg = ('A Balance process is already running for this '
                             'pool(%s). Resize is not supported during a '
                             'balance process.' % pool.name)
                    handle_exception(Exception(e_msg), request)

                if (free_percent < threshold_percent):
                    e_msg = ('Resize is only supported when there is at least '
                             '%d percent free space available. But currently '
                             'only %d percent is free. Remove some data and '
                             'try again.' % (threshold_percent, free_percent))
                    handle_exception(Exception(e_msg), request)

                if (new_raid != pool.raid):
                    if (((pool.raid in ('single', 'raid0')) and
                         new_raid in ('raid1', 'raid10'))):
                        cur_num_disks = num_total_disks - num_new_disks
                        if (num_new_disks < cur_num_disks):
                            e_msg = ('For single/raid0 to raid1/raid10 '
                                     'conversion, at least as many as present '
                                     'number of disks must be added. %d '
                                     'disks are provided, but at least %d are '
                                     'required.' % (num_new_disks,
                                                    cur_num_disks))
                            handle_exception(Exception(e_msg), request)

                resize_pool(pool, mount_disk, dnames)
                balance_pid = balance_start(pool, mount_disk, convert=new_raid)
                ps = PoolBalance(pool=pool, pid=balance_pid)
                ps.save()

                pool.raid = new_raid
                for d_o in disks:
                    d_o.pool = pool
                    d_o.save()

            elif (command == 'remove'):
                if (new_raid != pool.raid):
                    e_msg = ('Raid configuration cannot be changed while '
                             'removing disks')
                    handle_exception(Exception(e_msg), request)
                for d in disks:
                    if (d.pool is None or d.pool != pool):
                        e_msg = ('Disk(%s) cannot be removed because it does '
                                 'not belong to this Pool(%s)' %
                                 (d.name, pool.name))
                        handle_exception(Exception(e_msg), request)
                remaining_disks = (Disk.objects.filter(pool=pool).count() -
                                   num_new_disks)
                if (pool.raid in ('raid0', 'single',)):
                    e_msg = ('Disks cannot be removed from a pool with this '
                             'raid(%s) configuration' % pool.raid)
                    handle_exception(Exception(e_msg), request)

                if (pool.raid in ('raid5', 'raid6',)):
                    e_msg = ('Disk removal is not supported for pools with '
                             'raid5/6 configuration')
                    handle_exception(Exception(e_msg), request)

                if (pool.raid == 'raid10'):
                    if (num_new_disks != 2):
                        e_msg = ('Only two disks can be removed at once from '
                                 'this pool because of its raid '
                                 'configuration(%s)' % pool.raid)
                        handle_exception(Exception(e_msg), request)
                    elif (remaining_disks < 4):
                        e_msg = ('Disks cannot be removed from this pool '
                                 'because its raid configuration(%s) '
                                 'requires a minimum of 4 disks' % pool.raid)
                        handle_exception(Exception(e_msg), request)

                elif (pool.raid == 'raid1'):
                    if (num_new_disks != 1):
                        e_msg = ('Only one disk can be removed at once from '
                                 'this pool because of its raid '
                                 'configuration(%s)' % pool.raid)
                        handle_exception(Exception(e_msg), request)
                    elif (remaining_disks < 2):
                        e_msg = ('Disks cannot be removed from this pool '
                                 'because its raid configuration(%s) '
                                 'requires a minimum of 2 disks' % pool.raid)
                        handle_exception(Exception(e_msg), request)

                threshold_percent = 100 - threshold_percent
                if (free_percent < threshold_percent):
                    e_msg = ('Removing disks is only supported when there is '
                             'at least %d percent free space available. But '
                             'currently only %d percent is free. Remove some '
                             'data and try again.' %
                             (threshold_percent, free_percent))
                    handle_exception(Exception(e_msg), request)

                resize_pool(pool, mount_disk, dnames, add=False)
                balance_pid = balance_start(pool, mount_disk)
                ps = PoolBalance(pool=pool, pid=balance_pid)
                ps.save()

                for d in disks:
                    d.pool = None
                    d.save()

            else:
                e_msg = ('command(%s) is not supported.' % command)
                handle_exception(Exception(e_msg), request)
            usage = pool_usage('/%s/%s' % (settings.MNT_PT, pool.name))
            pool.size = usage[0]
            pool.save()
            return Response(PoolInfoSerializer(pool).data)
示例#44
0
    def put(self, request, pname, command):
        """
        resize a pool.
        @pname: pool's name
        @command: 'add' - add a list of disks and hence expand the pool
                  'remove' - remove a list of disks and hence shrink the pool
        """
        with self._handle_exception(request):
            try:
                pool = Pool.objects.get(name=pname)
            except:
                e_msg = ('Pool(%s) does not exist.' % pname)
                handle_exception(Exception(e_msg), request)

            if (pool.role == 'root'):
                e_msg = ('Edit operations are not allowed on this Pool(%s) '
                         'as it contains the operating system.' % pname)
                handle_exception(Exception(e_msg), request)

            if (command == 'remount'):
                return self._remount(request, pool)

            disks = [self._validate_disk(d, request) for d in
                     request.data.get('disks', [])]
            num_new_disks = len(disks)
            dnames = [d.name for d in disks]
            new_raid = request.data.get('raid_level', pool.raid)
            num_total_disks = (Disk.objects.filter(pool=pool).count() +
                               num_new_disks)
            if (command == 'add'):
                for d in disks:
                    if (d.pool is not None):
                        e_msg = ('Disk(%s) cannot be added to this Pool(%s) '
                                 'because it belongs to another pool(%s)' %
                                 (d.name, pool.name, d.pool.name))
                        handle_exception(Exception(e_msg), request)
                    if (d.btrfs_uuid is not None):
                        e_msg = ('Disk(%s) has a BTRFS filesystem from the '
                                 'past. If you really like to add it, wipe it '
                                 'from the Storage -> Disks screen of the '
                                 'web-ui' % d.name)
                        handle_exception(Exception(e_msg), request)
                if (new_raid == 'single'):
                    e_msg = ('Pool migration from %s to %s is not supported.'
                             % (pool.raid, new_raid))
                    handle_exception(Exception(e_msg), request)

                if (new_raid == 'raid10' and num_total_disks < 4):
                     e_msg = ('A minimum of Four drives are required for the '
                              'raid level: raid10')
                     handle_exception(Exception(e_msg), request)

                if (new_raid == 'raid6' and num_total_disks < 3):
                    e_msg = ('A minimum of Three drives are required for the '
                             'raid level: raid6')
                    handle_exception(Exception(e_msg), request)

                if (new_raid == 'raid5' and num_total_disks < 2):
                    e_msg == ('A minimum of Two drives are required for the '
                              'raid level: raid5')
                    handle_exception(Exception(e_msg), request)

                if (PoolBalance.objects.filter(
                        pool=pool,
                        status__regex=r'(started|running)').exists()):
                    e_msg = ('A Balance process is already running for this '
                             'pool(%s). Resize is not supported during a '
                             'balance process.' % pool.name)
                    handle_exception(Exception(e_msg), request)

                resize_pool(pool, dnames)
                tid = self._balance_start(pool, convert=new_raid)
                ps = PoolBalance(pool=pool, tid=tid)
                ps.save()

                pool.raid = new_raid
                for d_o in disks:
                    d_o.pool = pool
                    d_o.save()

            elif (command == 'remove'):
                if (new_raid != pool.raid):
                    e_msg = ('Raid configuration cannot be changed while '
                             'removing disks')
                    handle_exception(Exception(e_msg), request)
                for d in disks:
                    if (d.pool is None or d.pool != pool):
                        e_msg = ('Disk(%s) cannot be removed because it does '
                                 'not belong to this Pool(%s)' %
                                 (d.name, pool.name))
                        handle_exception(Exception(e_msg), request)
                remaining_disks = (Disk.objects.filter(pool=pool).count() -
                                   num_new_disks)
                if (pool.raid in ('raid0', 'single',)):
                    e_msg = ('Disks cannot be removed from a pool with this '
                             'raid(%s) configuration' % pool.raid)
                    handle_exception(Exception(e_msg), request)

                if (pool.raid == 'raid1' and remaining_disks < 2):
                    e_msg = ('Disks cannot be removed from this pool '
                             'because its raid configuration(raid1) '
                             'requires a minimum of 2 disks')
                    handle_exception(Exception(e_msg), request)

                if (pool.raid == 'raid10' and remaining_disks < 4):
                    e_msg = ('Disks cannot be removed from this pool '
                             'because its raid configuration(raid10) '
                             'requires a minimum of 4 disks')
                    handle_exception(Exception(e_msg), request)

                if (pool.raid == 'raid5' and remaining_disks < 2):
                    e_msg = ('Disks cannot be removed from this pool because '
                             'its raid configuration(raid5) requires a '
                             'minimum of 2 disks')
                    handle_exception(Exception(e_msg), request)

                if (pool.raid == 'raid6' and remaining_disks < 3):
                    e_msg = ('Disks cannot be removed from this pool because '
                             'its raid configuration(raid6) requires a '
                             'minimum of 3 disks')
                    handle_exception(Exception(e_msg), request)

                usage = pool_usage('/%s/%s' % (settings.MNT_PT, pool.name))
                size_cut = 0
                for d in disks:
                    size_cut += d.size
                if (size_cut >= usage[2]):
                    e_msg = ('Removing these(%s) disks may shrink the pool by '
                             '%dKB, which is greater than available free space'
                             ' %dKB. This is not supported.' %
                             (dnames, size_cut, usage[2]))
                    handle_exception(Exception(e_msg), request)

                resize_pool(pool, dnames, add=False)
                tid = self._balance_start(pool)
                ps = PoolBalance(pool=pool, tid=tid)
                ps.save()

                for d in disks:
                    d.pool = None
                    d.save()

            else:
                e_msg = ('command(%s) is not supported.' % command)
                handle_exception(Exception(e_msg), request)
            usage = pool_usage('/%s/%s' % (settings.MNT_PT, pool.name))
            pool.size = usage[0]
            pool.save()
            return Response(PoolInfoSerializer(pool).data)
示例#45
0
    def post(self, request):
        """
        input is a list of disks, raid_level and name of the pool.
        """
        with self._handle_exception(request):
            disks = [self._validate_disk(d, request) for d in
                     request.DATA.get('disks')]
            pname = request.DATA['pname']
            if (re.match('%s$' % settings.POOL_REGEX, pname) is None):
                e_msg = ('Pool name must start with a letter(a-z) and can'
                         ' be followed by any of the following characters: '
                         'letter(a-z), digits(0-9), hyphen(-), underscore'
                         '(_) or a period(.).')
                handle_exception(Exception(e_msg), request)

            if (Pool.objects.filter(name=pname).exists()):
                e_msg = ('Pool with name: %s already exists.' % pname)
                handle_exception(Exception(e_msg), request)

            for d in disks:
                if (d.btrfs_uuid is not None):
                    e_msg = ('Another BTRFS filesystem exists on this '
                             'disk(%s). Erase the disk and try again.'
                             % d.name)
                    handle_exception(Exception(e_msg), request)

            raid_level = request.DATA['raid_level']
            if (raid_level not in self.RAID_LEVELS):
                e_msg = ('Unsupported raid level. use one of: %s' %
                         self.RAID_LEVELS)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[1] and len(disks) == 1):
                e_msg = ('More than one disk is required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[2] and len(disks) < 2):
                e_msg = ('At least two disks are required for the raid level: '
                         '%s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[3]):
                if (len(disks) < 4):
                    e_msg = ('A minimum of Four drives are required for the '
                             'raid level: %s' % raid_level)
                    handle_exception(Exception(e_msg), request)
                elif (len(disks) % 2 != 0):
                    e_msg = ('Even number of drives are required for the '
                             'raid level: %s' % raid_level)
                    handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[4] and len(disks) < 3):
                e_msg = ('Three or more disks are required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[5] and len(disks) < 4):
                e_msg = ('Four or more disks are required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)

            compression = self._validate_compression(request)
            mnt_options = self._validate_mnt_options(request)
            dnames = [d.name for d in disks]
            p = Pool(name=pname, raid=raid_level, compression=compression,
                     mnt_options=mnt_options)
            add_pool(p, dnames)
            p.size = pool_usage(mount_root(p, dnames[0]))[0]
            p.uuid = btrfs_uuid(dnames[0])
            p.disk_set.add(*disks)
            p.save()
            return Response(PoolInfoSerializer(p).data)
示例#46
0
    def _update_disk_state():
        """
        A db atomic method to update the database of attached disks / drives.
        Works only on device serial numbers for drive identification.
        Calls scan_disks to establish the current connected drives info.
        Initially removes duplicate by serial number db entries to deal
        with legacy db states and obfuscates all previous device names as they
        are transient. The drive database is then updated with the attached
        disks info and previously known drives no longer found attached are
        marked as offline. All offline drives have their SMART availability and
        activation status removed and all attached drives have their SMART
        availability assessed and activated if available.
        :return: serialized models of attached and missing disks via serial num
        """
        # Acquire a list (namedtupil collection) of attached drives > min size
        disks = scan_disks(settings.MIN_DISK_SIZE)
        serial_numbers_seen = []
        # Make sane our db entries in view of what we know we have attached.
        # Device serial number is only known external unique entry, scan_disks
        # make this so in the case of empty or repeat entries by providing
        # fake serial numbers which are in turn flagged via WebUI as unreliable.
        # 1) scrub all device names with unique but nonsense uuid4
        # 1) mark all offline disks as such via db flag
        # 2) mark all offline disks smart available and enabled flags as False
        # logger.info('update_disk_state() Called')
        for do in Disk.objects.all():
            # Replace all device names with a unique placeholder on each scan
            # N.B. do not optimize by re-using uuid index as this could lead
            # to a non refreshed webui acting upon an entry that is different
            # from that shown to the user.
            do.name = 'detached-' + str(uuid.uuid4()).replace('-', '')
            # Delete duplicate or fake by serial number db disk entries.
            # It makes no sense to save fake serial number drives between scans
            # as on each scan the serial number is re-generated (fake) anyway.
            # Serial numbers beginning with 'fake-serial-' are from scan_disks.
            if (do.serial in serial_numbers_seen) or (
                    re.match('fake-serial-', do.serial) is not None):
                logger.info('Deleting duplicate or fake (by serial) Disk db '
                            'entry. Serial = %s' % do.serial)
                do.delete()  # django >=1.9 returns a dict of deleted items.
                # Continue onto next db disk object as nothing more to process.
                continue
            # first encounter of this serial in the db so stash it for reference
            serial_numbers_seen.append(deepcopy(do.serial))
            # Look for devices (by serial number) that are in the db but not in
            # our disk scan, ie offline / missing.
            if (do.serial not in [d.serial for d in disks]):
                # update the db entry as offline
                do.offline = True
                # disable S.M.A.R.T available and enabled flags.
                do.smart_available = do.smart_enabled = False
            do.save()  # make sure all updates are flushed to db
        # Our db now has no device name info as all dev names are place holders.
        # Iterate over attached drives to update the db's knowledge of them.
        # Kernel dev names are unique so safe to overwrite our db unique name.
        for d in disks:
            # start with an empty disk object
            dob = None
            # Convert our transient but just scanned so current sda type name
            # to a more useful by-id type name as found in /dev/disk/by-id
            byid_disk_name, is_byid = get_dev_byid_name(d.name, True)
            # If the db has an entry with this disk's serial number then
            # use this db entry and update the device name from our recent scan.
            if (Disk.objects.filter(serial=d.serial).exists()):
                dob = Disk.objects.get(serial=d.serial)
                #dob.name = d.name
                dob.name = byid_disk_name
            else:
                # We have an assumed new disk entry as no serial match in db.
                # Build a new entry for this disk.
                #dob = Disk(name=d.name, serial=d.serial)
                # N.B. we may want to force a fake-serial here if is_byid False,
                # that way we flag as unusable disk as no by-id type name found.
                # It may already have been set though as the only by-id
                # failures so far are virtio disks with no serial so scan_disks
                # will have already given it a fake serial in d.serial.
                dob = Disk(name=byid_disk_name, serial=d.serial)
            # Update the db disk object (existing or new) with our scanned info
            dob.size = d.size
            dob.parted = d.parted
            dob.offline = False  # as we are iterating over attached devices
            dob.model = d.model
            dob.transport = d.transport
            dob.vendor = d.vendor
            dob.btrfs_uuid = d.btrfs_uuid
            # If attached disk has an fs and it isn't btrfs
            if (d.fstype is not None and d.fstype != 'btrfs'):
                dob.btrfs_uuid = None
                dob.parted = True  # overload use of parted as non btrfs flag.
                # N.B. this overload use may become redundant with the addition
                # of the Disk.role field.
            # Update the role field with scan_disks findings, currently only
            # mdraid membership type based on fstype info. In the case of
            # these raid member indicators from scan_disks() we have the
            # current truth provided so update the db role status accordingly.
            # N.B. this if else could be expanded to accommodate other
            # roles based on the fs found
            if d.fstype == 'isw_raid_member' or d.fstype == 'linux_raid_member':
                # We have an indicator of mdraid membership so update existing
                # role info if any.
                # N.B. We have a minor legacy issue in that prior to using json
                # format for the db role field we stored one of 2 strings.
                # if these 2 strings are found then ignore them as we then
                # overwrite with our current finding and in the new json format.
                # I.e. non None could also be a legacy entry so follow overwrite
                # path when legacy entry found by treating as a None entry.
                # TODO: When we reset migrations the following need only check
                # TODO: "dob.role is not None"
                if dob.role is not None and dob.role != 'isw_raid_member' \
                        and dob.role != 'linux_raid_member':
                    # get our known roles into a dictionary
                    known_roles = json.loads(dob.role)
                    # create or update an mdraid dictionary entry
                    known_roles['mdraid'] = str(d.fstype)
                    # return updated dict to json format and store in db object
                    dob.role = json.dumps(known_roles)
                else:  # We have a dob.role = None so just insert our new role.
                    # Also applies to legacy pre json role entries.
                    dob.role = '{"mdraid": "' + d.fstype + '"}'  # json string
            else:  # We know this disk is not an mdraid raid member.
                # No identified role from scan_disks() fstype value (mdraid
                # only for now )so we preserve any prior known roles not
                # exposed by scan_disks but remove the mdraid role if found.
                # TODO: When we reset migrations the following need only check
                # TODO: "dob.role is not None"
                if dob.role is not None and dob.role != 'isw_raid_member' \
                        and dob.role != 'linux_raid_member':
                    # remove mdraid role if found but preserve prior roles
                    # which should now only be in json format
                    known_roles = json.loads(dob.role)
                    if 'mdraid' in known_roles:
                        if len(known_roles) > 1:
                            # mdraid is not the only entry so we have to pull
                            # out only mdraid from dict and convert back to json
                            del known_roles['mdraid']
                            dob.role = json.dumps(known_roles)
                        else:
                            # mdraid was the only entry so we need not bother
                            # with dict edit and json conversion only to end up
                            # with an empty json {} so revert to default 'None'.
                            dob.role = None
                else:  # Empty or legacy role entry.
                    # We have either None or a legacy mdraid role when this disk
                    # is no longer an mdraid member. We can now assert None.
                    dob.role = None
            # If our existing Pool db knows of this disk's pool via it's label:
            if (Pool.objects.filter(name=d.label).exists()):
                # update the disk db object's pool field accordingly.
                dob.pool = Pool.objects.get(name=d.label)

                #this is for backwards compatibility. root pools created
                #before the pool.role migration need this. It can safely be
                #removed a few versions after 3.8-11 or when we reset migrations.
                if (d.root is True):
                    dob.pool.role = 'root'
                    dob.pool.save()
            else:  # this disk is not known to exist in any pool via it's label
                dob.pool = None
            # If no pool has yet been found with this disk's label in and
            # the attached disk is our root disk (flagged by scan_disks)
            if (dob.pool is None and d.root is True):
                # setup our special root disk db entry in Pool
                # TODO: dynamically retrieve raid level.
                p = Pool(name=d.label, raid='single', role='root')
                p.disk_set.add(dob)
                p.save()
                # update disk db object to reflect special root pool status
                dob.pool = p
                dob.save()
                p.size = pool_usage(mount_root(p))[0]
                enable_quota(p)
                p.uuid = btrfs_uuid(dob.name)
                p.save()
            # save our updated db disk object
            dob.save()
        # Update online db entries with S.M.A.R.T availability and status.
        for do in Disk.objects.all():
            # find all the not offline db entries
            if (not do.offline):
                # We have an attached disk db entry.
                # Since our Disk.name model now uses by-id type names we can
                # do cheap matches to the beginnings of these names to find
                # virtio, md, or sdcard devices which are assumed to have no
                # SMART capability.
                # We also disable devices smart support when they have a
                # fake serial number as ascribed by scan_disks as any SMART
                # data collected is then less likely to be wrongly associated
                # with the next device that takes this temporary drive's name.
                # Also note that with no serial number some device types will
                # not have a by-id type name expected by the smart subsystem.
                # This has only been observed in no serial virtio devices.
                if (re.match('fake-serial-', do.serial) is not None) or \
                        (re.match('virtio-|md-|mmc-|nvme-', do.name) is not None):
                    # Virtio disks (named virtio-*), md devices (named md-*),
                    # and an sdcard reader that provides devs named mmc-* have
                    # no smart capability so avoid cluttering logs with
                    # exceptions on probing these with smart.available.
                    # nvme not yet supported by CentOS 7 smartmontools:
                    # https://www.smartmontools.org/ticket/657
                    # Thanks to @snafu in rockstor forum post 1567 for this.
                    do.smart_available = do.smart_enabled = False
                    continue
                # try to establish smart availability and status and update db
                try:
                    # for non ata/sata drives
                    do.smart_available, do.smart_enabled = smart.available(
                        do.name, do.smart_options)
                except Exception, e:
                    logger.exception(e)
                    do.smart_available = do.smart_enabled = False
            do.save()
示例#47
0
    def put(self, request, pname, command):
        """
        resize a pool.
        @pname: pool's name
        @command: 'add' - add a list of disks and hence expand the pool
                  'remove' - remove a list of disks and hence shrink the pool
        """
        with self._handle_exception(request):
            try:
                pool = Pool.objects.get(name=pname)
            except:
                e_msg = ('pool: %s does not exist' % pname)
                handle_exception(Exception(e_msg), request)

            disks = [self._validate_disk(d, request) for d in
                     request.DATA.get('disks')]
            if (len(disks) == 0):
                msg = ('list of disks in the input is empty')
                raise Exception(msg)
            dnames = [d.name for d in disks]

            for d in disks:
                if (d.pool is not None and d.pool != pool):
                    e_msg = ('Disk(%s) belongs to another pool(%s)' %
                             (d.name, d.pool.name))
                    handle_exception(Exception(e_msg), request)

            mount_disk = Disk.objects.filter(pool=pool)[0].name
            if (command == 'add'):
                for d_o in disks:
                    d_o.pool = pool
                    d_o.save()
                resize_pool(pool.name, mount_disk, dnames)
            elif (command == 'remove'):
                remaining_disks = Disk.objects.filter(pool=pool).count() - len(disks)
                logger.debug('remaining disks = %d' % remaining_disks)
                if (pool.raid == 'raid0' or pool.raid == 'raid1' or
                    pool.raid == 'raid10' or pool.raid == 'single'):
                    e_msg = ('Removing drives from this(%s) raid '
                             'configuration is not supported' % pool.raid)
                    handle_exception(Exception(e_msg), request)
                if (pool.raid == 'raid5' and remaining_disks < 3):
                    e_msg = ('Resize not possible because a minimum of 3 '
                             'drives is required for this(%s) '
                             'raid configuration.' % pool.raid)
                    handle_exception(Exception(e_msg), request)
                if (pool.raid == 'raid6' and remaining_disks < 4):
                    e_msg = ('Resize not possible because a minimum of 4 '
                             'drives is required for this(%s) raid '
                             'configuration' % pool.raid)
                    handle_exception(Exception(e_msg), request)
                for d in disks:
                    d.pool = None
                    d.save()
                mount_disk = Disk.objects.filter(pool=pool)[0].name
                resize_pool(pool.name, mount_disk, dnames, add=False)
            else:
                msg = ('unknown command: %s' % command)
                raise Exception(msg)
            usage = pool_usage(mount_disk)
            pool.size = usage[0]
            pool.save()
            return Response(PoolInfoSerializer(pool).data)