Beispiel #1
0
    def post(self, request):
        """
        input is a list of disks, raid_level and name of the pool.
        """
        try:
            disks = request.DATA['disks'].split(',')
            pname = request.DATA['pname']
            if (re.match('%s$' % settings.POOL_REGEX, pname) is None):
                e_msg = ('Pool name must start with a letter(a-z) and can'
                         ' be followed by any of the following characters: '
                         'letter(a-z), digits(0-9), hyphen(-), underscore'
                         '(_) or a period(.).')
                handle_exception(Exception(e_msg), request)

            if (Pool.objects.filter(name=pname).exists()):
                e_msg = ('Pool with name: %s already exists.' % pname)
                handle_exception(Exception(e_msg), request)

            for d in disks:
                if (not Disk.objects.filter(name=d).exists()):
                    e_msg = ('Unknown disk: %s' % d)
                    handle_exception(Exception(e_msg), request)

            raid_level = request.DATA['raid_level']
            if (raid_level not in self.RAID_LEVELS):
                e_msg = ('Unsupported raid level. use one of: %s' %
                         self.RAID_LEVELS)
                handle_exception(Exception(e_msg), request)
            if (raid_level in self.RAID_LEVELS[0:2] and len(disks) == 1):
                e_msg = ('More than one disk is required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[2] and len(disks) < 4):
                e_msg = ('Four or more disks are required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[4] and len(disks) < 2):
                e_msg = ('Two or more disks are required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[5] and len(disks) < 3):
                e_msg = ('Three or more disks are required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)

            p = Pool(name=pname, raid=raid_level)
            add_pool(pname, raid_level, raid_level, disks)
            usage = pool_usage(disks[0])
            p.size = usage[0]
            p.save()
            p.disk_set.add(*[Disk.objects.get(name=d) for d in disks])
            return Response(PoolInfoSerializer(p).data)
        except RockStorAPIException:
            raise
        except Exception, e:
            handle_exception(e, request)
Beispiel #2
0
 def _btrfs_disk_import(self, dname, request):
     try:
         disk = self._validate_disk(dname, request)
         p_info = get_pool_info(dname)
         # get some options from saved config?
         po = Pool(name=p_info['label'], raid="unknown")
         # need to save it so disk objects get updated properly in the for
         # loop below.
         po.save()
         for d in p_info['disks']:
             do = Disk.objects.get(name=d)
             do.pool = po
             do.parted = False
             do.save()
             mount_root(po)
         po.raid = pool_raid('%s%s' % (settings.MNT_PT, po.name))['data']
         po.size = po.usage_bound()
         po.save()
         enable_quota(po)
         import_shares(po, request)
         for share in Share.objects.filter(pool=po):
             import_snapshots(share)
         return Response(DiskInfoSerializer(disk).data)
     except Exception, e:
         e_msg = ('Failed to import any pool on this device(%s). Error: %s'
                  % (dname, e.__str__()))
         handle_exception(Exception(e_msg), request)
Beispiel #3
0
 def _btrfs_disk_import(self, dname, request):
     try:
         disk = self._validate_disk(dname, request)
         p_info = get_pool_info(dname)
         #get some options from saved config?
         po = Pool(name=p_info['label'], raid="unknown")
         for d in p_info['disks']:
             do = Disk.objects.get(name=d)
             do.pool = po
             do.save()
             mount_root(po, d)
         po.raid = pool_raid('%s%s' % (settings.MNT_PT, po.name))['data']
         po.size = pool_usage('%s%s' % (settings.MNT_PT, po.name))[0]
         po.save()
         return Response(DiskInfoSerializer(disk).data)
     except Exception, e:
         e_msg = ('Failed to import any pool on this device(%s). Error: %s'
                  % (dname, e.__str__()))
         handle_exception(Exception(e_msg), request)
Beispiel #4
0
    def post(self, request):
        """
        input is a list of disks, raid_level and name of the pool.
        """
        try:
            pname = request.DATA['pname']
            disks = request.DATA['disks'].split(',')

            if (Pool.objects.filter(name=pname).exists()):
                e_msg = ('Pool with name: %s already exists.' % pname)
                handle_exception(Exception(e_msg), request)

            for d in disks:
                if (not Disk.objects.filter(name=d).exists()):
                    e_msg = ('Unknown disk: %s' % d)
                    handle_exception(Exception(e_msg), request)

            raid_level = request.DATA['raid_level']
            if (raid_level not in self.RAID_LEVELS):
                e_msg = ('Unsupported raid level. use one of: %s' %
                         self.RAID_LEVELS)
                handle_exception(Exception(e_msg), request)
            if (raid_level in self.RAID_LEVELS[0:2] and len(disks) == 1):
                e_msg = ('More than one disk is required for the chosen raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[2] and len(disks) < 4):
                e_msg = ('Four or more disks are required for the chose raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)

            p = Pool(name=pname, raid=raid_level)
            add_pool(pname, raid_level, raid_level, disks)
            usage = pool_usage2(pname, disks[0]).split()
            p.size = int(usage[2]) + int(usage[3])
            p.save()
            p.disk_set.add(*[Disk.objects.get(name=d) for d in disks])
            return Response(PoolInfoSerializer(p).data)
        except RockStorAPIException:
            raise
        except Exception, e:
            handle_exception(e, request)
Beispiel #5
0
 def _create_root_pool(self, d):
     p = Pool(name=settings.ROOT_POOL, raid='single')
     p.disk_set.add(d)
     p.save()
     d.pool = p
     d.save()
     p.size = pool_usage(mount_root(p))[0]
     enable_quota(p)
     p.uuid = btrfs_uuid(d.name)
     p.save()
     return p
Beispiel #6
0
 def _update_disk_state():
     disks = scan_disks(settings.MIN_DISK_SIZE)
     for d in disks:
         dob = None
         if (Disk.objects.filter(name=d.name).exists()):
             dob = Disk.objects.get(name=d.name)
             dob.serial = d.serial
         elif (Disk.objects.filter(serial=d.serial).exists()):
             dob = Disk.objects.get(serial=d.serial)
             dob.name = d.name
         else:
             dob = Disk(name=d.name, size=d.size, parted=d.parted,
                        btrfs_uuid=d.btrfs_uuid, model=d.model,
                        serial=d.serial, transport=d.transport,
                        vendor=d.vendor)
         dob.size = d.size
         dob.parted = d.parted
         dob.offline = False
         dob.model = d.model
         dob.transport = d.transport
         dob.vendor = d.vendor
         dob.btrfs_uuid = d.btrfs_uuid
         if (d.fstype is not None and d.fstype != 'btrfs'):
             dob.btrfs_uuid = None
             dob.parted = True
         if (Pool.objects.filter(name=d.label).exists()):
             dob.pool = Pool.objects.get(name=d.label)
         else:
             dob.pool = None
         if (dob.pool is None and d.root is True):
             p = Pool(name=settings.ROOT_POOL, raid='single')
             p.disk_set.add(dob)
             p.save()
             dob.pool = p
             dob.save()
             p.size = pool_usage(mount_root(p))[0]
             enable_quota(p)
             p.uuid = btrfs_uuid(dob.name)
             p.save()
         dob.save()
     for do in Disk.objects.all():
         if (do.name not in [d.name for d in disks]):
             do.offline = True
         else:
             try:
                 # for non ata/sata drives
                 do.smart_available, do.smart_enabled = smart.available(do.name)
             except Exception, e:
                 logger.exception(e)
                 do.smart_available = do.smart_enabled = False
         do.save()
Beispiel #7
0
    def test_name_regex(self, mock_pool):
        """
        Share name must start with a alphanumeric (a-z0-9) ' 'character and
        can be followed by any of the ' 'following characters: letter(a-z),
        digits(0-9), ' 'hyphen(-), underscore(_) or a period(.).'  1. Test a
        few valid regexes (eg: share1, Myshare, 123, etc..)  2. Test a few
        invalid regexes (eg: -share1, .share etc..)  3. Empty string for share
        name 4. max length(254 characters) for share name 5. max length + 1 for
        share name

        """

        mock_pool.objects.get.side_effect = None
        temp_pool = Pool(id=1, name='rockstor_rockstor', size=88025459)
        mock_pool.objects.get.return_value = temp_pool

        # Mock exists to return false. Avoids all possible share names taken:
        # i.e. 'A pool with this name (rootshare) exists. Share and pool
        # names must be distinct. ...'
        # from: "Pool.objects.filter(name=sname).exists()"
        mock_pool.objects.filter.return_value.exists.return_value = False

        # valid share names
        data = {'pool': 'rockstor_rockstor', 'size': 1000}
        valid_names = (
            '123share',
            'SHARE_TEST',
            'Zzzz...',
            '1234',
            'myshare',
            'Sha' + 'r' * 250 + 'e',
        )

        for sname in valid_names:
            data['sname'] = sname
            response = self.client.post(self.BASE_URL, data=data)
            self.assertEqual(response.status_code,
                             status.HTTP_200_OK,
                             msg=response.data)
            self.assertEqual(response.data['name'], sname)

        # invalid share names
        e_msg = ('Invalid characters in share name. Following are '
                 'allowed: letter(a-z or A-Z), digit(0-9), '
                 'hyphen(-), underscore(_) or a period(.).')

        # The invalid_names list is based on above description, some are
        # POSIX valid but ruled out as less portable.
        invalid_names = (
            'Share 1',
            'a$sign'
            '/share',
            ':share',
            '\share',
            'question?mark',
            'asterix*',
            '',
            ' ',
        )
        for sname in invalid_names:
            data['sname'] = sname
            response = self.client.post(self.BASE_URL, data=data)
            self.assertEqual(response.status_code,
                             status.HTTP_500_INTERNAL_SERVER_ERROR,
                             msg=response.data)
            self.assertEqual(response.data[0], e_msg)

        # Share name with more than 255 characters
        e_msg = 'Share name length cannot exceed 254 characters.'

        data['sname'] = 'Sh' + 'a' * 251 + 're'
        response = self.client.post(self.BASE_URL, data=data)
        self.assertEqual(response.status_code,
                         status.HTTP_500_INTERNAL_SERVER_ERROR,
                         msg=response.data)
        self.assertEqual(response.data[0], e_msg)
    def test_delete_requests(self, mock_pool, mock_share, mock_snapshot):
        """
        1. Delete snapshot that does not exist
        2. Delete snapshot with no name specified
        """

        temp_pool = Pool(id=1, name='rockstor_rockstor', size=88025459)
        mock_pool.objects.get.return_value = temp_pool

        temp_share = Share(id=3, name='share1', pool=temp_pool, size=8025459)
        mock_share.objects.get.return_value = temp_share

        mock_snapshot.objects.get.side_effect = Snapshot.DoesNotExist

        # # Delete snapshot that does not exists
        snap_name = 'snap3'
        share_name = 'share1'
        share_id = 3  # from fix5.json
        response = self.client.delete('{}/{}/snapshots/{}'.format(
            self.BASE_URL, share_id, snap_name))
        self.assertEqual(response.status_code,
                         status.HTTP_500_INTERNAL_SERVER_ERROR,
                         msg=response.data)
        e_msg = 'Snapshot name (snap3) does not exist.'
        self.assertEqual(response.data[0], e_msg)

        temp_share2 = Share(id=4, name='share2', pool=temp_pool, size=8025459)
        mock_share.objects.get.return_value = temp_share2

        # Delete without snapshot name
        share_name = 'share1'
        share_id = 3  # from fix5.json
        response = self.client.delete('{}/{}/snapshots'.format(
            self.BASE_URL, share_id))
        self.assertEqual(response.status_code,
                         status.HTTP_200_OK,
                         msg=response.data)

        # Delete snapshot happy path
        # creating a snapshot just for the next test.
        # TODO: replace this repeat post test with a proper mock of a snapshot
        # ie attempted to use:

        # temp_snap = Snapshot(id=2, name='snap2', share=temp_share2,
        #                      snap_type='admin')
        # mock_snapshot.objects.get.return_value = temp_snap
        # mock_snapshot.objects.filter(share='share2', name='snap2'
        #                              ).exists.return_value = True
        # but received:
        # 'Snapshot name (snap2) does not exist.'

        data = {
            'snapshot-name': 'snap2',
            'shares': 'share2',
            'writable': False,
            'uvisible': False
        }
        snap_name = 'snap2'
        share = 'share2'
        share_id = 4
        response = self.client.post('{}/{}/snapshots/{}'.format(
            self.BASE_URL, share_id, snap_name),
                                    data=data,
                                    sname=share,
                                    snap_name=snap_name)
        self.assertEqual(response.status_code,
                         status.HTTP_200_OK,
                         msg=response.data)
        # now move to our happy path delete test of just created 'snap2'
        response = self.client.delete('{}/{}/snapshots/{}'.format(
            self.BASE_URL, share_id, snap_name))
        self.assertEqual(response.status_code,
                         status.HTTP_200_OK,
                         msg=response.data)
    def test_post_requests_2(self, mock_nfs, mock_pool, mock_share):
        """
        1. Create snapshot providing invalid uvisible bool type
        2. Create snapshot providing invalid writable bool type
        3. happy path to create snapshot
        2. Create a snapshot with duplicate name
        """

        temp_pool = Pool(id=1, name='rockstor_rockstor', size=88025459)
        mock_pool.objects.get.return_value = temp_pool

        temp_share = Share(id=3, name='share1', pool=temp_pool, size=8025459)
        mock_share.objects.get.return_value = temp_share
        # mock_snapshot.objects.get.side_effect = Snapshot.DoesNotExist

        # Invalid uvisible bool type
        data = {
            'snapshot-name': 'snap3',
            'shares': 'share1',
            'writable': False,
            'uvisible': 'invalid'
        }
        snap_name = 'snap3'
        share_name = 'share1'
        share_id = 3  # from fix5.json
        response = self.client.post('%s/%s/snapshots/%s' %
                                    (self.BASE_URL, share_id, snap_name),
                                    data=data,
                                    sname=share_name,
                                    snap_name=snap_name)
        self.assertEqual(response.status_code,
                         status.HTTP_500_INTERNAL_SERVER_ERROR,
                         msg=response.data)
        e_msg = "Element 'uvisible' must be a boolean, not (<type 'unicode'>)."
        self.assertEqual(response.data[0], e_msg)

        # Invalid writable bool type
        data = {
            'snapshot-name': 'snap3',
            'shares': 'share1',
            'writable': 'invalid',
            'uvisible': True
        }
        snap_name = 'snap3'
        share = 'share1'
        share_id = 3  # from fix5.json and above mocking object
        mock_nfs.objects.filter(share=share).exists.return_value = True
        response = self.client.post('{}/{}/snapshots/{}'.format(
            self.BASE_URL, share_id, snap_name),
                                    data=data,
                                    sname=share,
                                    snap_name=snap_name)
        self.assertEqual(response.status_code,
                         status.HTTP_500_INTERNAL_SERVER_ERROR,
                         msg=response.data)
        # TODO consider changing tested code to unify quota types to single
        # as per "Invalid uvisible bool type" to remove need for escaping here.
        e_msg = ('Element "writable" must be a boolean, not '
                 '(<type \'unicode\'>).')
        self.assertEqual(response.data[0], e_msg)

        # # Happy Path creating a snapshot by name snap3
        data = {
            'snapshot-name': 'snap3',
            'shares': 'share1',
            'writable': False,
            'uvisible': False
        }
        snap_name = 'snap3'
        share = 'share1'
        share_id = 3  # from fix5.json and above mocking object
        mock_nfs.objects.filter(share=share).exists.return_value = True
        response = self.client.post('{}/{}/snapshots/{}'.format(
            self.BASE_URL, share_id, snap_name),
                                    data=data,
                                    sname=share,
                                    snap_name=snap_name)
        self.assertEqual(response.status_code,
                         status.HTTP_200_OK,
                         msg=response.data)

        # # Create duplicate snapshot by name snap3
        data = {
            'snapshot-name': 'snap2',
            'shares': 'share2',
            'writable': True,
            'uvisible': True
        }
        snap_name = 'snap3'
        share_name = 'share1'
        share_id = 3  # from fix5.json
        response = self.client.post('{}/{}/snapshots/{}'.format(
            self.BASE_URL, share_id, snap_name),
                                    data=data,
                                    sname=share_name,
                                    snap_name=snap_name)
        self.assertEqual(response.status_code,
                         status.HTTP_500_INTERNAL_SERVER_ERROR,
                         msg=response.data)
        e_msg = ('Snapshot ({}) already exists for the '
                 'share ({}).').format(snap_name, share_name)
        self.assertEqual(response.data[0], e_msg)
Beispiel #10
0
    def post(self, request):
        """
        input is a list of disks, raid_level and name of the pool.
        """
        with self._handle_exception(request):
            disks = [self._validate_disk(d, request) for d in
                     request.DATA.get('disks')]
            pname = request.DATA['pname']
            if (re.match('%s$' % settings.POOL_REGEX, pname) is None):
                e_msg = ('Pool name must start with a letter(a-z) and can'
                         ' be followed by any of the following characters: '
                         'letter(a-z), digits(0-9), hyphen(-), underscore'
                         '(_) or a period(.).')
                handle_exception(Exception(e_msg), request)

            if (Pool.objects.filter(name=pname).exists()):
                e_msg = ('Pool with name: %s already exists.' % pname)
                handle_exception(Exception(e_msg), request)

            for d in disks:
                if (d.btrfs_uuid is not None):
                    e_msg = ('Another BTRFS filesystem exists on this '
                             'disk(%s). Erase the disk and try again.'
                             % d.name)
                    handle_exception(Exception(e_msg), request)

            raid_level = request.DATA['raid_level']
            if (raid_level not in self.RAID_LEVELS):
                e_msg = ('Unsupported raid level. use one of: %s' %
                         self.RAID_LEVELS)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[0] and len(disks) != 1):
                e_msg = ('Exactly one disk is required for the raid level: '
                         '%s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[1] and len(disks) == 1):
                e_msg = ('More than one disk is required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[2] and len(disks) != 2):
                e_msg = ('Exactly two disks are required for the raid level: '
                         '%s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[3]):
                if (len(disks) < 4):
                    e_msg = ('A minimum of Four drives are required for the '
                             'raid level: %s' % raid_level)
                    handle_exception(Exception(e_msg), request)
                elif (len(disks) % 2 != 0):
                    e_msg = ('Even number of drives are required for the '
                             'raid level: %s' % raid_level)
                    handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[4] and len(disks) < 3):
                e_msg = ('Three or more disks are required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[5] and len(disks) < 4):
                e_msg = ('Four or more disks are required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)

            dnames = [d.name for d in disks]
            pool_size = self._pool_size(dnames, raid_level)
            add_pool(pname, raid_level, raid_level, dnames)
            pool_uuid = btrfs_uuid(dnames[0])
            p = Pool(name=pname, raid=raid_level, size=pool_size,
                     uuid=pool_uuid)
            p.save()
            p.disk_set.add(*disks)
            return Response(PoolInfoSerializer(p).data)
Beispiel #11
0
 def _update_disk_state():
     """
     A db atomic method to update the database of attached disks / drives.
     Works only on device serial numbers for drive identification.
     Calls scan_disks to establish the current connected drives info.
     Initially removes duplicate by serial number db entries to deal
     with legacy db states and obfuscates all previous device names as they
     are transient. The drive database is then updated with the attached
     disks info and previously known drives no longer found attached are
     marked as offline. All offline drives have their SMART availability and
     activation status removed and all attached drives have their SMART
     availability assessed and activated if available.
     :return: serialized models of attached and missing disks via serial num
     """
     # Acquire a list (namedtupil collection) of attached drives > min size
     disks = scan_disks(settings.MIN_DISK_SIZE)
     serial_numbers_seen = []
     # Make sane our db entries in view of what we know we have attached.
     # Device serial number is only known external unique entry, scan_disks
     # make this so in the case of empty or repeat entries by providing
     # fake serial numbers which are in turn flagged via WebUI as unreliable.
     # 1) scrub all device names with unique but nonsense uuid4
     # 1) mark all offline disks as such via db flag
     # 2) mark all offline disks smart available and enabled flags as False
     logger.info('update_disk_state() Called')
     for do in Disk.objects.all():
         # Replace all device names with a unique placeholder on each scan
         # N.B. do not optimize by re-using uuid index as this could lead
         # to a non refreshed webui acting upon an entry that is different
         # from that shown to the user.
         do.name = str(uuid.uuid4()).replace('-', '')  # 32 chars long
         # Delete duplicate or fake by serial number db disk entries.
         # It makes no sense to save fake serial number drives between scans
         # as on each scan the serial number is re-generated anyway.
         if (do.serial in serial_numbers_seen) or (len(do.serial) == 48):
             logger.info('Deleting duplicate or fake (by serial) Disk db '
                         'entry. Serial = %s' % do.serial)
             do.delete()  # django >=1.9 returns a dict of deleted items.
             # Continue onto next db disk object as nothing more to process.
             continue
         # first encounter of this serial in the db so stash it for reference
         serial_numbers_seen.append(deepcopy(do.serial))
         # Look for devices (by serial number) that are in the db but not in
         # our disk scan, ie offline / missing.
         if (do.serial not in [d.serial for d in disks]):
             # update the db entry as offline
             do.offline = True
             # disable S.M.A.R.T available and enabled flags.
             do.smart_available = do.smart_enabled = False
         do.save()  # make sure all updates are flushed to db
     # Our db now has no device name info as all dev names are place holders.
     # Iterate over attached drives to update the db's knowledge of them.
     # Kernel dev names are unique so safe to overwrite our db unique name.
     for d in disks:
         # start with an empty disk object
         dob = None
         # If the db has an entry with this disk's serial number then
         # use this db entry and update the device name from our recent scan.
         if (Disk.objects.filter(serial=d.serial).exists()):
             dob = Disk.objects.get(serial=d.serial)
             dob.name = d.name
         else:
             # We have an assumed new disk entry as no serial match in db.
             # Build a new entry for this disk.
             dob = Disk(name=d.name, serial=d.serial)
         # Update the db disk object (existing or new) with our scanned info
         dob.size = d.size
         dob.parted = d.parted
         dob.offline = False  # as we are iterating over attached devices
         dob.model = d.model
         dob.transport = d.transport
         dob.vendor = d.vendor
         dob.btrfs_uuid = d.btrfs_uuid
         # If attached disk has an fs and it isn't btrfs
         if (d.fstype is not None and d.fstype != 'btrfs'):
             dob.btrfs_uuid = None
             dob.parted = True
         # If our existing Pool db knows of this disk's pool via it's label:
         if (Pool.objects.filter(name=d.label).exists()):
             # update the disk db object's pool field accordingly.
             dob.pool = Pool.objects.get(name=d.label)
         else:  # this disk is not known to exist in any pool via it's label
             dob.pool = None
         # If no pool has yet been found with this disk's label in and
         # the attached disk is our root disk (flagged by scan_disks)
         if (dob.pool is None and d.root is True):
             # setup our special root disk db entry in Pool
             p = Pool(name=settings.ROOT_POOL, raid='single')
             p.disk_set.add(dob)
             p.save()
             # update disk db object to reflect special root pool status
             dob.pool = p
             dob.save()
             p.size = pool_usage(mount_root(p))[0]
             enable_quota(p)
             p.uuid = btrfs_uuid(dob.name)
             p.save()
         # save our updated db disk object
         dob.save()
     # Update online db entries with S.M.A.R.T availability and status.
     for do in Disk.objects.all():
         # find all the not offline db entries
         if (not do.offline):
             # We have an attached disk db entry
             if re.match('vd', do.name):
                 # Virtio disks (named vd*) have no smart capability.
                 # avoids cluttering logs with exceptions on these devices.
                 do.smart_available = do.smart_enabled = False
                 continue
             # try to establish smart availability and status and update db
             try:
                 # for non ata/sata drives
                 do.smart_available, do.smart_enabled = smart.available(do.name)
             except Exception, e:
                 logger.exception(e)
                 do.smart_available = do.smart_enabled = False
         do.save()
Beispiel #12
0
 def _create_root_pool(self, d):
     p = Pool(name=settings.ROOT_POOL, raid='single')
     p.size = pool_usage(mount_root(p, d.name))[0]
     enable_quota(p, '/dev/%s' % d.name)
     p.uuid = btrfs_uuid(d.name)
     return p
Beispiel #13
0
    def _update_disk_state():
        """
        A db atomic method to update the database of attached disks / drives.
        Works only on device serial numbers for drive identification.
        Calls scan_disks to establish the current connected drives info.
        Initially removes duplicate by serial number db entries to deal
        with legacy db states and obfuscates all previous device names as they
        are transient. The drive database is then updated with the attached
        disks info and previously known drives no longer found attached are
        marked as offline. All offline drives have their SMART availability and
        activation status removed and all attached drives have their SMART
        availability assessed and activated if available.
        :return: serialized models of attached and missing disks via serial num
        """
        # Acquire a list (namedtupil collection) of attached drives > min size
        disks = scan_disks(settings.MIN_DISK_SIZE)
        serial_numbers_seen = []
        # Make sane our db entries in view of what we know we have attached.
        # Device serial number is only known external unique entry, scan_disks
        # make this so in the case of empty or repeat entries by providing
        # fake serial numbers which are flagged via WebUI as unreliable.
        # 1) Scrub all device names with unique but nonsense uuid4.
        # 2) Mark all offline disks as such via db flag.
        # 3) Mark all offline disks smart available and enabled flags as False.
        for do in Disk.objects.all():
            # Replace all device names with a unique placeholder on each scan
            # N.B. do not optimize by re-using uuid index as this could lead
            # to a non refreshed webui acting upon an entry that is different
            # from that shown to the user.
            do.name = 'detached-' + str(uuid.uuid4()).replace('-', '')
            # Delete duplicate or fake by serial number db disk entries.
            # It makes no sense to save fake serial number drives between scans
            # as on each scan the serial number is re-generated (fake) anyway.
            # Serial numbers beginning with 'fake-serial-' are from scan_disks.
            if (do.serial in serial_numbers_seen) or (re.match(
                    'fake-serial-', do.serial) is not None):
                logger.info('Deleting duplicate or fake (by serial) Disk db '
                            'entry. Serial = %s' % do.serial)
                do.delete()  # django >=1.9 returns a dict of deleted items.
                # Continue onto next db disk object as nothing more to process.
                continue
            # first encounter of this serial in the db so stash it for
            # reference
            serial_numbers_seen.append(deepcopy(do.serial))
            # Look for devices (by serial number) that are in the db but not in
            # our disk scan, ie offline / missing.
            if (do.serial not in [d.serial for d in disks]):
                # update the db entry as offline
                do.offline = True
                # disable S.M.A.R.T available and enabled flags.
                do.smart_available = do.smart_enabled = False
            do.save()  # make sure all updates are flushed to db
        # Our db now has no device name info: all dev names are place holders.
        # Iterate over attached drives to update the db's knowledge of them.
        # Kernel dev names are unique so safe to overwrite our db unique name.
        for d in disks:
            # start with an empty disk object
            dob = None
            # an empty dictionary of non scan_disk() roles
            non_scan_disks_roles = {}
            # and an empty dictionary of discovered roles
            disk_roles_identified = {}
            # Convert our transient but just scanned so current sda type name
            # to a more useful by-id type name as found in /dev/disk/by-id
            byid_disk_name, is_byid = get_dev_byid_name(d.name, True)
            # If the db has an entry with this disk's serial number then
            # use this db entry and update the device name from our new scan.
            if (Disk.objects.filter(serial=d.serial).exists()):
                dob = Disk.objects.get(serial=d.serial)
                dob.name = byid_disk_name
            else:
                # We have an assumed new disk entry as no serial match in db.
                # Build a new entry for this disk.  N.B. we may want to force a
                # fake-serial here if is_byid False, that way we flag as
                # unusable disk as no by-id type name found.  It may already
                # have been set though as the only by-id failures so far are
                # virtio disks with no serial so scan_disks will have already
                # given it a fake serial in d.serial.
                dob = Disk(name=byid_disk_name, serial=d.serial, role=None)
            # Update the db disk object (existing or new) with our scanned info
            dob.size = d.size
            dob.parted = d.parted
            dob.offline = False  # as we are iterating over attached devices
            dob.model = d.model
            dob.transport = d.transport
            dob.vendor = d.vendor
            # N.B. The Disk.btrfs_uuid is in some senses becoming misleading
            # as we begin to deal with Disk.role managed drives such as mdraid
            # members and full disk LUKS drives where we can make use of the
            # non btrfs uuids to track filesystems or LUKS containers.
            # Leaving as is for now to avoid db changes.
            dob.btrfs_uuid = d.uuid
            # If attached disk has an fs and it isn't btrfs
            if (d.fstype is not None and d.fstype != 'btrfs'):
                # blank any btrfs_uuid it may have had previously.
                dob.btrfs_uuid = None
            # ### BEGINNING OF ROLE FIELD UPDATE ###
            # Update the role field with scan_disks findings.
            # SCAN_DISKS_KNOWN_ROLES a list of scan_disks identifiable roles.
            # Deal with legacy non json role field contents by erasure.
            # N.B. We have a minor legacy issue in that prior to using json
            # format for the db role field we stored one of 2 strings.
            # If either of these 2 strings are found reset to db default of
            # None
            if dob.role == 'isw_raid_member'\
                    or dob.role == 'linux_raid_member':
                # These are the only legacy non json formatted roles used.
                # Erase legacy role entries as we are about to update the role
                # anyway and new entries will then be in the new json format.
                # This helps to keeps the following role logic cleaner and
                # existing mdraid members will be re-assigned if appropriate
                # using the new json format.
                dob.role = None
            # First extract all non scan_disks assigned roles so we can add
            # them back later; all scan_disks assigned roles will be identified
            # from our recent scan_disks data so we assert the new truth.
            if dob.role is not None:  # db default null=True so None here.
                # Get our previous roles into a dictionary
                previous_roles = json.loads(dob.role)
                # Preserve non scan_disks identified roles for this db entry
                non_scan_disks_roles = {
                    role: v
                    for role, v in previous_roles.items()
                    if role not in SCAN_DISKS_KNOWN_ROLES
                }
            if d.fstype == 'isw_raid_member' \
                    or d.fstype == 'linux_raid_member':
                # MDRAID MEMBER: scan_disks() can informs us of the truth
                # regarding mdraid membership via d.fstype indicators.
                # create or update an mdraid dictionary entry
                disk_roles_identified['mdraid'] = str(d.fstype)
            if d.fstype == 'crypto_LUKS':
                # LUKS FULL DISK: scan_disks() can inform us of the truth
                # regarding full disk LUKS containers which on creation have a
                # unique uuid. Stash this uuid so we might later work out our
                # container mapping.
                disk_roles_identified['LUKS'] = str(d.uuid)
            if d.type == 'crypt':
                # OPEN LUKS DISK: scan_disks() can inform us of the truth
                # regarding an opened LUKS container which appears as a mapped
                # device. Assign the /dev/disk/by-id name as a value.
                disk_roles_identified['openLUKS'] = 'dm-name-%s' % d.name
            if d.fstype == 'bcache':
                # BCACHE: scan_disks() can inform us of the truth regarding
                # bcache "backing devices" so we assign a role to avoid these
                # devices being seen as unused and accidentally deleted. Once
                # formatted with make-bcache -B they are accessed via a virtual
                # device which should end up with a serial of bcache-(d.uuid)
                # here we tag our backing device with it's virtual counterparts
                # serial number.
                disk_roles_identified['bcache'] = 'bcache-%s' % d.uuid
            if d.fstype == 'bcache-cdev':
                # BCACHE: continued; here we use the scan_disks() added info
                # of this bcache device being a cache device not a backing
                # device, so it will have no virtual block device counterpart
                # but likewise must be specifically attributed (ie to fast
                # ssd type drives) so we flag in the role system differently.
                disk_roles_identified['bcachecdev'] = 'bcache-%s' % d.uuid
            if d.root is True:
                # ROOT DISK: scan_disks() has already identified the current
                # truth regarding the device hosting our root '/' fs so update
                # our role accordingly.
                # N.B. value of d.fstype here is essentially a place holder as
                # the presence or otherwise of the 'root' key is all we need.
                disk_roles_identified['root'] = str(d.fstype)
            if d.partitions != {}:
                # PARTITIONS: scan_disks() has built an updated partitions dict
                # so create a partitions role containing this dictionary.
                # Convert scan_disks() transient (but just scanned so current)
                # sda type names to a more useful by-id type name as found
                # in /dev/disk/by-id for each partition name.
                byid_partitions = {
                    get_dev_byid_name(part, True)[0]:
                    d.partitions.get(part, "")
                    for part in d.partitions
                }
                # In the above we fail over to "" on failed index for now.
                disk_roles_identified['partitions'] = byid_partitions
            # Now we join the previous non scan_disks identified roles dict
            # with those we have identified from our fresh scan_disks() data
            # and return the result to our db entry in json format.
            # Note that dict of {} isn't None
            if (non_scan_disks_roles != {}) or (disk_roles_identified != {}):
                combined_roles = dict(non_scan_disks_roles,
                                      **disk_roles_identified)
                dob.role = json.dumps(combined_roles)
            else:
                dob.role = None
            # END OF ROLE FIELD UPDATE
            # If our existing Pool db knows of this disk's pool via it's label:
            if (Pool.objects.filter(name=d.label).exists()):
                # update the disk db object's pool field accordingly.
                dob.pool = Pool.objects.get(name=d.label)

                # this is for backwards compatibility. root pools created
                # before the pool.role migration need this. It can safely be
                # removed a few versions after 3.8-11 or when we reset
                # migrations.
                if (d.root is True):
                    dob.pool.role = 'root'
                    dob.pool.save()
            else:  # this disk is not known to exist in any pool via it's label
                dob.pool = None
            # If no pool has yet been found with this disk's label in and
            # the attached disk is our root disk (flagged by scan_disks)
            if (dob.pool is None and d.root is True):
                # setup our special root disk db entry in Pool
                # TODO: dynamically retrieve raid level.
                p = Pool(name=d.label, raid='single', role='root')
                p.save()
                p.disk_set.add(dob)
                # update disk db object to reflect special root pool status
                dob.pool = p
                dob.save()
                p.size = p.usage_bound()
                enable_quota(p)
                p.uuid = btrfs_uuid(dob.name)
                p.save()
            # save our updated db disk object
            dob.save()
        # Update online db entries with S.M.A.R.T availability and status.
        for do in Disk.objects.all():
            # find all the not offline db entries
            if (not do.offline):
                # We have an attached disk db entry.
                # Since our Disk.name model now uses by-id type names we can
                # do cheap matches to the beginnings of these names to find
                # virtio, md, or sdcard devices which are assumed to have no
                # SMART capability.
                # We also disable devices smart support when they have a
                # fake serial number as ascribed by scan_disks as any SMART
                # data collected is then less likely to be wrongly associated
                # with the next device that takes this temporary drive's name.
                # Also note that with no serial number some device types will
                # not have a by-id type name expected by the smart subsystem.
                # This has only been observed in no serial virtio devices.
                if (re.match('fake-serial-', do.serial)
                        is not None) or (re.match(
                            'virtio-|md-|mmc-|nvme-|dm-name-luks-|bcache',
                            do.name) is not None):
                    # Virtio disks (named virtio-*), md devices (named md-*),
                    # and an sdcard reader that provides devs named mmc-* have
                    # no smart capability so avoid cluttering logs with
                    # exceptions on probing these with smart.available.
                    # nvme not yet supported by CentOS 7 smartmontools:
                    # https://www.smartmontools.org/ticket/657
                    # Thanks to @snafu in rockstor forum post 1567 for this.
                    do.smart_available = do.smart_enabled = False
                    continue
                # try to establish smart availability and status and update db
                try:
                    # for non ata/sata drives
                    do.smart_available, do.smart_enabled = smart.available(
                        do.name, do.smart_options)
                except Exception as e:
                    logger.exception(e)
                    do.smart_available = do.smart_enabled = False
            do.save()
        ds = DiskInfoSerializer(Disk.objects.all().order_by('name'), many=True)
        return Response(ds.data)
Beispiel #14
0
 def _create_root_pool(self, d):
     p = Pool(name=settings.ROOT_POOL, raid='single')
     p.size = pool_usage(mount_root(p, d.name))[0]
     enable_quota(p, '/dev/%s' % d.name)
     p.uuid = btrfs_uuid(d.name)
     return p
Beispiel #15
0
    def _update_disk_state():
        """
        A db atomic method to update the database of attached disks / drives.
        Works only on device serial numbers for drive identification.
        Calls scan_disks to establish the current connected drives info.
        Initially removes duplicate by serial number db entries to deal
        with legacy db states and obfuscates all previous device names as they
        are transient. The drive database is then updated with the attached
        disks info and previously known drives no longer found attached are
        marked as offline. All offline drives have their SMART availability and
        activation status removed and all attached drives have their SMART
        availability assessed and activated if available.
        :return: serialized models of attached and missing disks via serial num
        """
        # Acquire a list (namedtupil collection) of attached drives > min size
        disks = scan_disks(settings.MIN_DISK_SIZE)
        serial_numbers_seen = []
        # Make sane our db entries in view of what we know we have attached.
        # Device serial number is only known external unique entry, scan_disks
        # make this so in the case of empty or repeat entries by providing
        # fake serial numbers which are in turn flagged via WebUI as unreliable.
        # 1) scrub all device names with unique but nonsense uuid4
        # 1) mark all offline disks as such via db flag
        # 2) mark all offline disks smart available and enabled flags as False
        # logger.info('update_disk_state() Called')
        for do in Disk.objects.all():
            # Replace all device names with a unique placeholder on each scan
            # N.B. do not optimize by re-using uuid index as this could lead
            # to a non refreshed webui acting upon an entry that is different
            # from that shown to the user.
            do.name = str(uuid.uuid4()).replace('-', '')  # 32 chars long
            # Delete duplicate or fake by serial number db disk entries.
            # It makes no sense to save fake serial number drives between scans
            # as on each scan the serial number is re-generated anyway.
            if (do.serial in serial_numbers_seen) or (len(do.serial) == 48):
                logger.info('Deleting duplicate or fake (by serial) Disk db '
                            'entry. Serial = %s' % do.serial)
                do.delete()  # django >=1.9 returns a dict of deleted items.
                # Continue onto next db disk object as nothing more to process.
                continue
            # first encounter of this serial in the db so stash it for reference
            serial_numbers_seen.append(deepcopy(do.serial))
            # Look for devices (by serial number) that are in the db but not in
            # our disk scan, ie offline / missing.
            if (do.serial not in [d.serial for d in disks]):
                # update the db entry as offline
                do.offline = True
                # disable S.M.A.R.T available and enabled flags.
                do.smart_available = do.smart_enabled = False
            do.save()  # make sure all updates are flushed to db
        # Our db now has no device name info as all dev names are place holders.
        # Iterate over attached drives to update the db's knowledge of them.
        # Kernel dev names are unique so safe to overwrite our db unique name.
        for d in disks:
            # start with an empty disk object
            dob = None
            # If the db has an entry with this disk's serial number then
            # use this db entry and update the device name from our recent scan.
            if (Disk.objects.filter(serial=d.serial).exists()):
                dob = Disk.objects.get(serial=d.serial)
                dob.name = d.name
            else:
                # We have an assumed new disk entry as no serial match in db.
                # Build a new entry for this disk.
                dob = Disk(name=d.name, serial=d.serial)
            # Update the db disk object (existing or new) with our scanned info
            dob.size = d.size
            dob.parted = d.parted
            dob.offline = False  # as we are iterating over attached devices
            dob.model = d.model
            dob.transport = d.transport
            dob.vendor = d.vendor
            dob.btrfs_uuid = d.btrfs_uuid
            # If attached disk has an fs and it isn't btrfs
            if (d.fstype is not None and d.fstype != 'btrfs'):
                dob.btrfs_uuid = None
                dob.parted = True  # overload use of parted as non btrfs flag.
                # N.B. this overload use may become redundant with the addition
                # of the Disk.role field.
                if d.fstype == 'isw_raid_member' \
                        or d.fstype == 'linux_raid_member':
                    # transfer fstype raid member indicator to role field
                    dob.role = d.fstype
                else:
                    # No identified role from scan_disks() fstype indicator so
                    # set as None to update db of new drive role. If we don't
                    # do this then the same drive when re-deployed will inherit
                    # it's previous role in the db which may be desired but in
                    # the case of these raid member indicators from scan_disks()
                    # we have the current truth provided.
                    # N.B. this if else could be expanded to accommodate other
                    # roles based on the fs found and also take heed of an
                    # existing devices db role entry prior to overwriting.
                    dob.role = None
            # If our existing Pool db knows of this disk's pool via it's label:
            if (Pool.objects.filter(name=d.label).exists()):
                # update the disk db object's pool field accordingly.
                dob.pool = Pool.objects.get(name=d.label)

                #this is for backwards compatibility. root pools created
                #before the pool.role migration need this. It can safely be
                #removed a few versions after 3.8-11 or when we reset migrations.
                if (d.root is True):
                    dob.pool.role = 'root'
                    dob.pool.save()
            else:  # this disk is not known to exist in any pool via it's label
                dob.pool = None
            # If no pool has yet been found with this disk's label in and
            # the attached disk is our root disk (flagged by scan_disks)
            if (dob.pool is None and d.root is True):
                # setup our special root disk db entry in Pool
                #@todo: dynamically retrieve raid level.
                p = Pool(name=d.label, raid='single', role='root')
                p.disk_set.add(dob)
                p.save()
                # update disk db object to reflect special root pool status
                dob.pool = p
                dob.save()
                p.size = pool_usage(mount_root(p))[0]
                enable_quota(p)
                p.uuid = btrfs_uuid(dob.name)
                p.save()
            # save our updated db disk object
            dob.save()
        # Update online db entries with S.M.A.R.T availability and status.
        for do in Disk.objects.all():
            # find all the not offline db entries
            if (not do.offline):
                # We have an attached disk db entry
                if (re.match('vd|md|mmcblk', do.name) is not None):
                    # Virtio disks (named vd*), md devices (named md*), and
                    # an sdcard reader that provides devs named mmcblk* have
                    # no smart capability so avoid cluttering logs with
                    # exceptions on these types of devices.
                    do.smart_available = do.smart_enabled = False
                    continue
                # try to establish smart availability and status and update db
                try:
                    # for non ata/sata drives
                    do.smart_available, do.smart_enabled = smart.available(
                        do.name, do.smart_options)
                except Exception, e:
                    logger.exception(e)
                    do.smart_available = do.smart_enabled = False
            do.save()
Beispiel #16
0
    def post(self, request):
        """
        input is a list of disks, raid_level and name of the pool.
        """
        with self._handle_exception(request):
            disks = [
                self._validate_disk(d, request)
                for d in request.data.get('disks')
            ]
            pname = request.data['pname']
            if (re.match('%s$' % settings.POOL_REGEX, pname) is None):
                e_msg = (
                    'Invalid characters in Pool name. Following '
                    'characters are allowed: letter(a-z or A-Z), digit(0-9), '
                    'hyphen(-), underscore(_) or a period(.).')
                handle_exception(Exception(e_msg), request)

            if (len(pname) > 255):
                e_msg = ('Pool name must be less than 255 characters')
                handle_exception(Exception(e_msg), request)

            if (Pool.objects.filter(name=pname).exists()):
                e_msg = ('Pool(%s) already exists. Choose a different name' %
                         pname)
                handle_exception(Exception(e_msg), request)

            if (Share.objects.filter(name=pname).exists()):
                e_msg = (
                    'A Share with this name(%s) exists. Pool and Share names '
                    'must be distinct. Choose a different name' % pname)
                handle_exception(Exception(e_msg), request)

            for d in disks:
                if (d.btrfs_uuid is not None):
                    e_msg = ('Another BTRFS filesystem exists on this '
                             'disk(%s). Erase the disk and try again.' %
                             d.name)
                    handle_exception(Exception(e_msg), request)

            raid_level = request.data['raid_level']
            if (raid_level not in self.RAID_LEVELS):
                e_msg = ('Unsupported raid level. use one of: {}'.format(
                    self.RAID_LEVELS))
                handle_exception(Exception(e_msg), request)
            # consolidated raid0 & raid 1 disk check
            if (raid_level in self.RAID_LEVELS[1:3] and len(disks) <= 1):
                e_msg = ('At least two disks are required for the raid level: '
                         '%s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[3]):
                if (len(disks) < 4):
                    e_msg = ('A minimum of Four drives are required for the '
                             'raid level: %s' % raid_level)
                    handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[4] and len(disks) < 2):
                e_msg = ('Two or more disks are required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[5] and len(disks) < 3):
                e_msg = ('Three or more disks are required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)

            compression = self._validate_compression(request)
            mnt_options = self._validate_mnt_options(request)
            dnames = [d.name for d in disks]
            p = Pool(name=pname,
                     raid=raid_level,
                     compression=compression,
                     mnt_options=mnt_options)
            p.disk_set.add(*disks)
            p.save()
            # added for loop to save disks
            # appears p.disk_set.add(*disks) was not saving disks in test environment
            for d in disks:
                d.pool = p
                d.save()
            add_pool(p, dnames)
            p.size = pool_usage(mount_root(p))[0]
            p.uuid = btrfs_uuid(dnames[0])
            p.save()
            return Response(PoolInfoSerializer(p).data)
    def post(self, request, uuid):
        """
        import a pool with given uuid
        """
        disks = Disk.objects.filter(btrfs_uuid=uuid)

        if (not btrfs_importable(disks[0].name)):
            e_msg = ('btrfs check failed on device: %s Cannot automatically '
                     'import the pool with uuid: %s' % (disks[0].name, uuid))
            handle_exception(Exception(e_msg), request)


        #get name of the pool
        pname = btrfs_label(uuid)

        #mount the pool
        mount_root(pname, '/dev/%s' % disks[0].name)
        pool_mnt_pt = '%s/%s' % (settings.MNT_PT, pname)

        #get raid level
        raid_level = btrfs_raid_level(pname)
        if (raid_level is None):
            umount_root(pool_mnt_pt)
            e_msg = ('Problem while probing for the raid level of the pool.'
                     'Cannot automatically import the pool with uuid: %s' %
                     uuid)
            handle_exception(Exception(e_msg), request)

        #check for shares in the pool
        subvols, e, rc = subvol_list_helper(pool_mnt_pt)
        snap_list = snapshot_list(pool_mnt_pt)
        share_map = {}
        for s in subvols:
            s_fields = s.split()
            if (s_fields[-1] not in snap_list):
                share_map[s_fields[-1]] = s_fields[1]

        entries = os.listdir(pool_mnt_pt)
        e_msg_prefix = ('Only btrfs filesystem with nothing but subvolumes in '
                        'it can be imported.')
        for e in entries:
            if (os.path.isfile('%s/%s' % (pool_mnt_pt, e))):
                e_msg = ('%s Unexpected file %s found. Due to this reason, '
                         'pool with uuid: %s cannot be imported' %
                         (e_msg_prefix, e, uuid))
                handle_exception(Exception(e_msg), request)
            elif (e not in share_map):
                e_msg = ('%s Unexpected directory %s found. Due to this '
                         'reason, pool with uuid: %s cannot be imported' %
                         (e_msg_prefix, e, uuid))
                handle_exception(Exception(e_msg), request)

        #add pool model
        pool_size = self._pool_size(disks, raid_level)
        p = Pool(name=pname, raid=raid_level, size=pool_size, uuid=uuid)
        p.save()

        #import shares
        for s in share_map.keys():
            so = Share(pool=p, qgroup='0/%s' % share_map[s], name=s,
                       size=qgroup_size, subvol_name=s, replica=False)
            so.save()

            #import snapshots?
            for snap in snap_list:
                snap_fields = snap.split('_')
                snap_name = snap_fields[-1]
                sname = '_'.join(snap_fields[0:-1])
                if (sname == s):
                    snapo = Snapshot(share=so, name=snap_name,
                                     real_name=snap, qgroup=qgroup_id)
                    snapo.save()
Beispiel #18
0
    def _update_disk_state():
        """
        A db atomic method to update the database of attached disks / drives.
        Works only on device serial numbers for drive identification.
        Calls scan_disks to establish the current connected drives info.
        Initially removes duplicate by serial number db entries to deal
        with legacy db states and obfuscates all previous device names as they
        are transient. The drive database is then updated with the attached
        disks info and previously known drives no longer found attached are
        marked as offline. All offline drives have their SMART availability and
        activation status removed and all attached drives have their SMART
        availability assessed and activated if available.
        :return: serialized models of attached and missing disks via serial num
        """
        # Acquire a list (namedtupil collection) of attached drives > min size
        disks = scan_disks(settings.MIN_DISK_SIZE)
        serial_numbers_seen = []
        # Make sane our db entries in view of what we know we have attached.
        # Device serial number is only known external unique entry, scan_disks
        # make this so in the case of empty or repeat entries by providing
        # fake serial numbers which are in turn flagged via WebUI as unreliable.
        # 1) scrub all device names with unique but nonsense uuid4
        # 1) mark all offline disks as such via db flag
        # 2) mark all offline disks smart available and enabled flags as False
        # logger.info('update_disk_state() Called')
        for do in Disk.objects.all():
            # Replace all device names with a unique placeholder on each scan
            # N.B. do not optimize by re-using uuid index as this could lead
            # to a non refreshed webui acting upon an entry that is different
            # from that shown to the user.
            do.name = 'detached-' + str(uuid.uuid4()).replace('-', '')
            # Delete duplicate or fake by serial number db disk entries.
            # It makes no sense to save fake serial number drives between scans
            # as on each scan the serial number is re-generated (fake) anyway.
            # Serial numbers beginning with 'fake-serial-' are from scan_disks.
            if (do.serial in serial_numbers_seen) or (re.match(
                    'fake-serial-', do.serial) is not None):
                logger.info('Deleting duplicate or fake (by serial) Disk db '
                            'entry. Serial = %s' % do.serial)
                do.delete()  # django >=1.9 returns a dict of deleted items.
                # Continue onto next db disk object as nothing more to process.
                continue
            # first encounter of this serial in the db so stash it for reference
            serial_numbers_seen.append(deepcopy(do.serial))
            # Look for devices (by serial number) that are in the db but not in
            # our disk scan, ie offline / missing.
            if (do.serial not in [d.serial for d in disks]):
                # update the db entry as offline
                do.offline = True
                # disable S.M.A.R.T available and enabled flags.
                do.smart_available = do.smart_enabled = False
            do.save()  # make sure all updates are flushed to db
        # Our db now has no device name info as all dev names are place holders.
        # Iterate over attached drives to update the db's knowledge of them.
        # Kernel dev names are unique so safe to overwrite our db unique name.
        for d in disks:
            # start with an empty disk object
            dob = None
            # Convert our transient but just scanned so current sda type name
            # to a more useful by-id type name as found in /dev/disk/by-id
            byid_disk_name, is_byid = get_dev_byid_name(d.name, True)
            # If the db has an entry with this disk's serial number then
            # use this db entry and update the device name from our recent scan.
            if (Disk.objects.filter(serial=d.serial).exists()):
                dob = Disk.objects.get(serial=d.serial)
                #dob.name = d.name
                dob.name = byid_disk_name
            else:
                # We have an assumed new disk entry as no serial match in db.
                # Build a new entry for this disk.
                #dob = Disk(name=d.name, serial=d.serial)
                # N.B. we may want to force a fake-serial here if is_byid False,
                # that way we flag as unusable disk as no by-id type name found.
                # It may already have been set though as the only by-id
                # failures so far are virtio disks with no serial so scan_disks
                # will have already given it a fake serial in d.serial.
                dob = Disk(name=byid_disk_name, serial=d.serial)
            # Update the db disk object (existing or new) with our scanned info
            dob.size = d.size
            dob.parted = d.parted
            dob.offline = False  # as we are iterating over attached devices
            dob.model = d.model
            dob.transport = d.transport
            dob.vendor = d.vendor
            dob.btrfs_uuid = d.btrfs_uuid
            # If attached disk has an fs and it isn't btrfs
            if (d.fstype is not None and d.fstype != 'btrfs'):
                dob.btrfs_uuid = None
                dob.parted = True  # overload use of parted as non btrfs flag.
                # N.B. this overload use may become redundant with the addition
                # of the Disk.role field.
            # Update the role field with scan_disks findings, currently only
            # mdraid membership type based on fstype info. In the case of
            # these raid member indicators from scan_disks() we have the
            # current truth provided so update the db role status accordingly.
            # N.B. this if else could be expanded to accommodate other
            # roles based on the fs found
            if d.fstype == 'isw_raid_member' or d.fstype == 'linux_raid_member':
                # We have an indicator of mdraid membership so update existing
                # role info if any.
                # N.B. We have a minor legacy issue in that prior to using json
                # format for the db role field we stored one of 2 strings.
                # if these 2 strings are found then ignore them as we then
                # overwrite with our current finding and in the new json format.
                # I.e. non None could also be a legacy entry so follow overwrite
                # path when legacy entry found by treating as a None entry.
                # TODO: When we reset migrations the following need only check
                # TODO: "dob.role is not None"
                if dob.role is not None and dob.role != 'isw_raid_member' \
                        and dob.role != 'linux_raid_member':
                    # get our known roles into a dictionary
                    known_roles = json.loads(dob.role)
                    # create or update an mdraid dictionary entry
                    known_roles['mdraid'] = str(d.fstype)
                    # return updated dict to json format and store in db object
                    dob.role = json.dumps(known_roles)
                else:  # We have a dob.role = None so just insert our new role.
                    # Also applies to legacy pre json role entries.
                    dob.role = '{"mdraid": "' + d.fstype + '"}'  # json string
            else:  # We know this disk is not an mdraid raid member.
                # No identified role from scan_disks() fstype value (mdraid
                # only for now )so we preserve any prior known roles not
                # exposed by scan_disks but remove the mdraid role if found.
                # TODO: When we reset migrations the following need only check
                # TODO: "dob.role is not None"
                if dob.role is not None and dob.role != 'isw_raid_member' \
                        and dob.role != 'linux_raid_member':
                    # remove mdraid role if found but preserve prior roles
                    # which should now only be in json format
                    known_roles = json.loads(dob.role)
                    if 'mdraid' in known_roles:
                        if len(known_roles) > 1:
                            # mdraid is not the only entry so we have to pull
                            # out only mdraid from dict and convert back to json
                            del known_roles['mdraid']
                            dob.role = json.dumps(known_roles)
                        else:
                            # mdraid was the only entry so we need not bother
                            # with dict edit and json conversion only to end up
                            # with an empty json {} so revert to default 'None'.
                            dob.role = None
                else:  # Empty or legacy role entry.
                    # We have either None or a legacy mdraid role when this disk
                    # is no longer an mdraid member. We can now assert None.
                    dob.role = None
            # If our existing Pool db knows of this disk's pool via it's label:
            if (Pool.objects.filter(name=d.label).exists()):
                # update the disk db object's pool field accordingly.
                dob.pool = Pool.objects.get(name=d.label)

                #this is for backwards compatibility. root pools created
                #before the pool.role migration need this. It can safely be
                #removed a few versions after 3.8-11 or when we reset migrations.
                if (d.root is True):
                    dob.pool.role = 'root'
                    dob.pool.save()
            else:  # this disk is not known to exist in any pool via it's label
                dob.pool = None
            # If no pool has yet been found with this disk's label in and
            # the attached disk is our root disk (flagged by scan_disks)
            if (dob.pool is None and d.root is True):
                # setup our special root disk db entry in Pool
                # TODO: dynamically retrieve raid level.
                p = Pool(name=d.label, raid='single', role='root')
                p.save()
                p.disk_set.add(dob)
                # update disk db object to reflect special root pool status
                dob.pool = p
                dob.save()
                p.size = p.usage_bound()
                enable_quota(p)
                p.uuid = btrfs_uuid(dob.name)
                p.save()
            # save our updated db disk object
            dob.save()
        # Update online db entries with S.M.A.R.T availability and status.
        for do in Disk.objects.all():
            # find all the not offline db entries
            if (not do.offline):
                # We have an attached disk db entry.
                # Since our Disk.name model now uses by-id type names we can
                # do cheap matches to the beginnings of these names to find
                # virtio, md, or sdcard devices which are assumed to have no
                # SMART capability.
                # We also disable devices smart support when they have a
                # fake serial number as ascribed by scan_disks as any SMART
                # data collected is then less likely to be wrongly associated
                # with the next device that takes this temporary drive's name.
                # Also note that with no serial number some device types will
                # not have a by-id type name expected by the smart subsystem.
                # This has only been observed in no serial virtio devices.
                if (re.match('fake-serial-', do.serial) is not None) or \
                        (re.match('virtio-|md-|mmc-|nvme-', do.name) is not None):
                    # Virtio disks (named virtio-*), md devices (named md-*),
                    # and an sdcard reader that provides devs named mmc-* have
                    # no smart capability so avoid cluttering logs with
                    # exceptions on probing these with smart.available.
                    # nvme not yet supported by CentOS 7 smartmontools:
                    # https://www.smartmontools.org/ticket/657
                    # Thanks to @snafu in rockstor forum post 1567 for this.
                    do.smart_available = do.smart_enabled = False
                    continue
                # try to establish smart availability and status and update db
                try:
                    # for non ata/sata drives
                    do.smart_available, do.smart_enabled = smart.available(
                        do.name, do.smart_options)
                except Exception, e:
                    logger.exception(e)
                    do.smart_available = do.smart_enabled = False
            do.save()
Beispiel #19
0
    def test_create(self, mock_pool):
        """
        Test POST request to create shares
        1. Create share on a nonexistent pool
        2. Create share on root pool
        3. Create share with invalid compression
        4. Create share with invalid sizes
        5. Create share with duplicate names
        6. Create share with valid replica
        7. Create share with invalid replica
        8. Create share with share size > pool size
        """

        # mock_pool.objects.filter.return_value.exists.return_value = False
        mock_pool.objects.get.side_effect = Pool.DoesNotExist

        # create a share on a pool that does not exist
        data = {
            'sname': 'rootshare',
            'pool': 'does_not_exist',
            'size': 1048576
        }
        e_msg = 'Pool (does_not_exist) does not exist.'
        response = self.client.post(self.BASE_URL, data=data)
        self.assertEqual(response.status_code,
                         status.HTTP_500_INTERNAL_SERVER_ERROR,
                         msg=response.data)
        self.assertEqual(response.data[0], e_msg)

        mock_pool.objects.get.side_effect = None
        temp_pool = Pool(id=1, name='rockstor_rockstor', size=88025459)
        mock_pool.objects.get.return_value = temp_pool
        # Mock exists to return false. Avoids all possible share names taken:
        # i.e. 'A pool with this name (rootshare) exists. Share and pool
        # names must be distinct. ...'
        # from: "Pool.objects.filter(name=sname).exists()"
        mock_pool.objects.filter.return_value.exists.return_value = False

        # create a share on root pool
        data['pool'] = 'rockstor_rockstor'
        response2 = self.client.post(self.BASE_URL, data=data)
        self.assertEqual(response2.status_code,
                         status.HTTP_200_OK,
                         msg=response2.data)
        self.assertEqual(response2.data['name'], 'rootshare')

        # create a share with invalid compression
        data['compression'] = 'invalid'
        e_msg2 = ("Unsupported compression algorithm (invalid). Use one of "
                  "('lzo', 'zlib', 'no').")
        response3 = self.client.post(self.BASE_URL, data=data)
        self.assertEqual(response3.status_code,
                         status.HTTP_500_INTERNAL_SERVER_ERROR,
                         msg=response3.data)
        self.assertEqual(response3.data[0], e_msg2)

        # create a share with invalid size (too small)
        data2 = {'sname': 'too_small', 'pool': 'rockstor_rockstor', 'size': 1}
        e_msg3 = 'Share size should be at least 100 KB. Given size is 1 KB.'
        response4 = self.client.post(self.BASE_URL, data=data2)
        self.assertEqual(response4.status_code,
                         status.HTTP_500_INTERNAL_SERVER_ERROR,
                         msg=response4.data)
        self.assertEqual(response4.data[0], e_msg3)

        # create a share with invalid size (non integer)
        data2['size'] = 'non int'
        e_msg4 = 'Share size must be an integer.'
        response5 = self.client.post(self.BASE_URL, data=data2)
        self.assertEqual(response5.status_code,
                         status.HTTP_500_INTERNAL_SERVER_ERROR,
                         msg=response5.data)
        self.assertEqual(response5.data[0], e_msg4)

        mock_pool.objects.filter.return_value.exists.return_value = True

        # create share with same name as a pool that already exists
        data3 = {
            'sname': 'rockstor_rockstor',
            'pool': 'rockstor_rockstor',
            'size': 1048576
        }
        e_msg5 = ('A pool with this name (rockstor_rockstor) exists. Share '
                  'and pool names must be distinct. Choose '
                  'a different name.')
        response6 = self.client.post(self.BASE_URL, data=data3)
        self.assertEqual(response6.status_code,
                         status.HTTP_500_INTERNAL_SERVER_ERROR,
                         msg=response6.data)
        self.assertEqual(response6.data[0], e_msg5)

        # create share with name that already exists
        data3['sname'] = 'rootshare'
        e_msg6 = 'Share (rootshare) already exists. Choose a different name.'
        response7 = self.client.post(self.BASE_URL, data=data3)
        self.assertEqual(response7.status_code,
                         status.HTTP_500_INTERNAL_SERVER_ERROR,
                         msg=response7.data)
        self.assertEqual(response7.data[0], e_msg6)

        mock_pool.objects.filter.return_value.exists.return_value = False

        # create share with valid replica
        data4 = {
            'sname': 'valid_replica',
            'pool': 'rockstor_rockstor',
            'size': 100,
            'replica': True
        }
        response8 = self.client.post(self.BASE_URL, data=data4)
        self.assertEqual(response8.status_code,
                         status.HTTP_200_OK,
                         msg=response8.data)
        self.assertEqual(response8.data['name'], 'valid_replica')
        self.assertEqual(response8.data['replica'], True)

        # create share with invalid replica
        data5 = {
            'sname': 'invalid_replica',
            'pool': 'rockstor_rockstor',
            'size': 100,
            'replica': 'non-bool'
        }
        e_msg7 = "Replica must be a boolean, not (<type 'unicode'>)."
        response9 = self.client.post(self.BASE_URL, data=data5)
        self.assertEqual(response9.status_code,
                         status.HTTP_500_INTERNAL_SERVER_ERROR,
                         msg=response9.data)
        self.assertEqual(response9.data[0], e_msg7)

        # create share with size > pool size
        data6 = {
            'sname': 'too_big',
            'pool': 'rockstor_rockstor',
            'size': 10000000000000
        }
        response10 = self.client.post(self.BASE_URL, data=data6)
        self.assertEqual(response10.status_code,
                         status.HTTP_200_OK,
                         msg=response10.data)
        self.assertEqual(response10.data['name'], 'too_big')
        pool = Pool.objects.get(name=data6['pool'])
        self.assertEqual(response10.data['size'], pool.size)
Beispiel #20
0
    def post(self, request):
        """
        input is a list of disks, raid_level and name of the pool.
        """
        with self._handle_exception(request):
            disks = [self._validate_disk(d, request) for d in
                     request.data.get('disks')]
            pname = request.data['pname']
            if (re.match('%s$' % settings.POOL_REGEX, pname) is None):
                e_msg = ('Invalid characters in pool name. Following '
                         'characters are allowed: letter(a-z or A-Z), '
                         'digit(0-9), '
                         'hyphen(-), underscore(_) or a period(.).')
                handle_exception(Exception(e_msg), request)

            if (len(pname) > 255):
                e_msg = 'Pool name must be less than 255 characters.'
                handle_exception(Exception(e_msg), request)

            if (Pool.objects.filter(name=pname).exists()):
                e_msg = ('Pool ({}) already exists. '
                         'Choose a different name.').format(pname)
                handle_exception(Exception(e_msg), request)

            if (Share.objects.filter(name=pname).exists()):
                e_msg = ('A share with this name ({}) exists. Pool and share '
                         'names must be distinct. '
                         'Choose a different name.').format(pname)
                handle_exception(Exception(e_msg), request)

            for d in disks:
                if (d.btrfs_uuid is not None):
                    e_msg = ('Another BTRFS filesystem exists on this '
                             'disk ({}). '
                             'Erase the disk and try again.').format(d.name)
                    handle_exception(Exception(e_msg), request)

            raid_level = request.data['raid_level']
            if (raid_level not in self.RAID_LEVELS):
                e_msg = ('Unsupported raid level. Use one of: '
                         '{}.').format(self.RAID_LEVELS)
                handle_exception(Exception(e_msg), request)
            # consolidated raid0 & raid 1 disk check
            if (raid_level in self.RAID_LEVELS[1:3] and len(disks) <= 1):
                e_msg = ('At least 2 disks are required for the raid level: '
                         '{}.').format(raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[3]):
                if (len(disks) < 4):
                    e_msg = ('A minimum of 4 drives are required for the '
                             'raid level: {}.').format(raid_level)
                    handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[4] and len(disks) < 2):
                e_msg = ('2 or more disks are required for the raid '
                         'level: {}.').format(raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[5] and len(disks) < 3):
                e_msg = ('3 or more disks are required for the raid '
                         'level: {}.').format(raid_level)
                handle_exception(Exception(e_msg), request)

            compression = self._validate_compression(request)
            mnt_options = self._validate_mnt_options(request)
            dnames = self._role_filter_disk_names(disks, request)
            p = Pool(name=pname, raid=raid_level, compression=compression,
                     mnt_options=mnt_options)
            p.save()
            p.disk_set.add(*disks)
            # added for loop to save disks appears p.disk_set.add(*disks) was
            # not saving disks in test environment
            for d in disks:
                d.pool = p
                d.save()
            add_pool(p, dnames)
            p.size = p.usage_bound()
            p.uuid = btrfs_uuid(dnames[0])
            p.save()
            # Now we ensure udev info is updated via system wide trigger
            # as per pool resize add, only here it is for a new pool.
            trigger_udev_update()
            return Response(PoolInfoSerializer(p).data)
Beispiel #21
0
    def test_resize(self, mock_pool):
        """
        Test PUT request to update size of share
        1. Create valid share
        2. Valid resize
        3. Resize nonexistent share
        4. Resize share below current usage value
        5. Resize share below minimum 100KB
        """

        mock_pool.objects.get.side_effect = None
        temp_pool = Pool(id=1, name='rockstor_rockstor', size=88025459)
        mock_pool.objects.get.return_value = temp_pool
        # Mock exists to return false. Avoids all possible share names taken:
        # i.e. 'A pool with this name (rootshare) exists. Share and pool
        # names must be distinct. ...'
        # from: "Pool.objects.filter(name=sname).exists()"
        mock_pool.objects.filter.return_value.exists.return_value = False

        # create new share
        data = {'sname': 'share2', 'pool': 'rockstor_rockstor', 'size': 1000}
        response = self.client.post(self.BASE_URL, data=data)
        self.assertEqual(response.status_code,
                         status.HTTP_200_OK,
                         msg=response.data)
        self.assertEqual(response.data['name'], 'share2')
        self.assertEqual(response.data['size'], 1000)
        share = Share.objects.get(name='share2')
        sId = share.id

        # resize share
        data3 = {
            'size': 2000,
        }
        response3 = self.client.put('{}/{}'.format(self.BASE_URL, sId),
                                    data=data3)
        self.assertEqual(response3.status_code,
                         status.HTTP_200_OK,
                         msg=response3.data)
        self.assertEqual(response3.data['size'], 2000)

        # TODO: Needs revisiting
        # # resize the 'root' share
        # # in test_shares.json as id=1 name='root'
        # data3 = {'size': 1500}
        # response3 = self.client.put('%s/1' % self.BASE_URL, data=data3)
        # self.assertEqual(response3.status_code,
        #                  status.HTTP_500_INTERNAL_SERVER_ERROR,
        #                  msg=response3.data)
        # e_msg = ('Operation not permitted on this share (root) because it is '
        #          'a special system share.')
        # self.assertEqual(response3.data[0], e_msg)
        #
        # # resize a 'home' share
        # data3 = {'size': 1500}
        # response3 = self.client.put('%s/home' % self.BASE_URL, data=data3)
        # self.assertEqual(response3.status_code,
        #                  status.HTTP_500_INTERNAL_SERVER_ERROR,
        #                  msg=response3.data)
        # e_msg = ('Operation not permitted on this share (home) because it is '
        #          'a special system share.')
        # self.assertEqual(response3.data[0], e_msg)

        # resize to below current share usage value
        data3 = {'size': 400}
        response3 = self.client.put('{}/{}'.format(self.BASE_URL, sId),
                                    data=data3)
        self.assertEqual(response3.status_code,
                         status.HTTP_500_INTERNAL_SERVER_ERROR,
                         msg=response3.data)
        e_msg = ('Unable to resize because requested new size 400 KB is less '
                 'than current usage 500 KB of the share.')
        self.assertEqual(response3.data[0], e_msg)

        # TODO: needs revisiting
        # # resize below 100KB
        # self.mock_share_usage.return_value = 50
        # data3 = {'size': 99}
        # response3 = self.client.put('%s/share1' % self.BASE_URL, data=data3)
        # self.assertEqual(response3.status_code,
        #                  status.HTTP_500_INTERNAL_SERVER_ERROR,
        #                  msg=response3.data)
        # e_msg = 'Share size should be at least 100 KB. Given size is 99 KB.'
        # self.assertEqual(response3.data[0], e_msg)

        # resize a share that doesn't exist
        sId_invalid = 99999
        data3 = {'sname': 'invalid', 'size': 1500}
        response3 = self.client.put('{}/{}'.format(self.BASE_URL, sId_invalid),
                                    data=data3)
        self.assertEqual(response3.status_code,
                         status.HTTP_500_INTERNAL_SERVER_ERROR,
                         msg=response3.data)
        e_msg = 'Share id ({}) does not exist.'.format(sId_invalid)
        self.assertEqual(response3.data[0], e_msg)
Beispiel #22
0
    def _update_disk_state():
        """
        A db atomic method to update the database of attached disks / drives.
        Works only on device serial numbers for drive identification.
        Calls scan_disks to establish the current connected drives info.
        Initially removes duplicate by serial number db entries to deal
        with legacy db states and obfuscates all previous device names as they
        are transient. The drive database is then updated with the attached
        disks info and previously known drives no longer found attached are
        marked as offline. All offline drives have their SMART availability and
        activation status removed and all attached drives have their SMART
        availability assessed and activated if available.
        :return: serialized models of attached and missing disks via serial num
        """
        # Acquire a list (namedtupil collection) of attached drives > min size
        disks = scan_disks(settings.MIN_DISK_SIZE)
        serial_numbers_seen = []
        # Make sane our db entries in view of what we know we have attached.
        # Device serial number is only known external unique entry, scan_disks
        # make this so in the case of empty or repeat entries by providing
        # fake serial numbers which are in turn flagged via WebUI as unreliable.
        # 1) scrub all device names with unique but nonsense uuid4
        # 1) mark all offline disks as such via db flag
        # 2) mark all offline disks smart available and enabled flags as False
        # logger.info('update_disk_state() Called')
        for do in Disk.objects.all():
            # Replace all device names with a unique placeholder on each scan
            # N.B. do not optimize by re-using uuid index as this could lead
            # to a non refreshed webui acting upon an entry that is different
            # from that shown to the user.
            do.name = 'detached-' + str(uuid.uuid4()).replace('-', '')
            # Delete duplicate or fake by serial number db disk entries.
            # It makes no sense to save fake serial number drives between scans
            # as on each scan the serial number is re-generated (fake) anyway.
            # Serial numbers beginning with 'fake-serial-' are from scan_disks.
            if (do.serial in serial_numbers_seen) or (
                    re.match('fake-serial-', do.serial) is not None):
                logger.info('Deleting duplicate or fake (by serial) Disk db '
                            'entry. Serial = %s' % do.serial)
                do.delete()  # django >=1.9 returns a dict of deleted items.
                # Continue onto next db disk object as nothing more to process.
                continue
            # first encounter of this serial in the db so stash it for reference
            serial_numbers_seen.append(deepcopy(do.serial))
            # Look for devices (by serial number) that are in the db but not in
            # our disk scan, ie offline / missing.
            if (do.serial not in [d.serial for d in disks]):
                # update the db entry as offline
                do.offline = True
                # disable S.M.A.R.T available and enabled flags.
                do.smart_available = do.smart_enabled = False
            do.save()  # make sure all updates are flushed to db
        # Our db now has no device name info as all dev names are place holders.
        # Iterate over attached drives to update the db's knowledge of them.
        # Kernel dev names are unique so safe to overwrite our db unique name.
        for d in disks:
            # start with an empty disk object
            dob = None
            # Convert our transient but just scanned so current sda type name
            # to a more useful by-id type name as found in /dev/disk/by-id
            byid_disk_name, is_byid = get_dev_byid_name(d.name, True)
            # If the db has an entry with this disk's serial number then
            # use this db entry and update the device name from our recent scan.
            if (Disk.objects.filter(serial=d.serial).exists()):
                dob = Disk.objects.get(serial=d.serial)
                #dob.name = d.name
                dob.name = byid_disk_name
            else:
                # We have an assumed new disk entry as no serial match in db.
                # Build a new entry for this disk.
                #dob = Disk(name=d.name, serial=d.serial)
                # N.B. we may want to force a fake-serial here if is_byid False,
                # that way we flag as unusable disk as no by-id type name found.
                # It may already have been set though as the only by-id
                # failures so far are virtio disks with no serial so scan_disks
                # will have already given it a fake serial in d.serial.
                dob = Disk(name=byid_disk_name, serial=d.serial)
            # Update the db disk object (existing or new) with our scanned info
            dob.size = d.size
            dob.parted = d.parted
            dob.offline = False  # as we are iterating over attached devices
            dob.model = d.model
            dob.transport = d.transport
            dob.vendor = d.vendor
            dob.btrfs_uuid = d.btrfs_uuid
            # If attached disk has an fs and it isn't btrfs
            if (d.fstype is not None and d.fstype != 'btrfs'):
                dob.btrfs_uuid = None
                dob.parted = True  # overload use of parted as non btrfs flag.
                # N.B. this overload use may become redundant with the addition
                # of the Disk.role field.
            # Update the role field with scan_disks findings, currently only
            # mdraid membership type based on fstype info. In the case of
            # these raid member indicators from scan_disks() we have the
            # current truth provided so update the db role status accordingly.
            # N.B. this if else could be expanded to accommodate other
            # roles based on the fs found
            if d.fstype == 'isw_raid_member' or d.fstype == 'linux_raid_member':
                # We have an indicator of mdraid membership so update existing
                # role info if any.
                # N.B. We have a minor legacy issue in that prior to using json
                # format for the db role field we stored one of 2 strings.
                # if these 2 strings are found then ignore them as we then
                # overwrite with our current finding and in the new json format.
                # I.e. non None could also be a legacy entry so follow overwrite
                # path when legacy entry found by treating as a None entry.
                # TODO: When we reset migrations the following need only check
                # TODO: "dob.role is not None"
                if dob.role is not None and dob.role != 'isw_raid_member' \
                        and dob.role != 'linux_raid_member':
                    # get our known roles into a dictionary
                    known_roles = json.loads(dob.role)
                    # create or update an mdraid dictionary entry
                    known_roles['mdraid'] = str(d.fstype)
                    # return updated dict to json format and store in db object
                    dob.role = json.dumps(known_roles)
                else:  # We have a dob.role = None so just insert our new role.
                    # Also applies to legacy pre json role entries.
                    dob.role = '{"mdraid": "' + d.fstype + '"}'  # json string
            else:  # We know this disk is not an mdraid raid member.
                # No identified role from scan_disks() fstype value (mdraid
                # only for now )so we preserve any prior known roles not
                # exposed by scan_disks but remove the mdraid role if found.
                # TODO: When we reset migrations the following need only check
                # TODO: "dob.role is not None"
                if dob.role is not None and dob.role != 'isw_raid_member' \
                        and dob.role != 'linux_raid_member':
                    # remove mdraid role if found but preserve prior roles
                    # which should now only be in json format
                    known_roles = json.loads(dob.role)
                    if 'mdraid' in known_roles:
                        if len(known_roles) > 1:
                            # mdraid is not the only entry so we have to pull
                            # out only mdraid from dict and convert back to json
                            del known_roles['mdraid']
                            dob.role = json.dumps(known_roles)
                        else:
                            # mdraid was the only entry so we need not bother
                            # with dict edit and json conversion only to end up
                            # with an empty json {} so revert to default 'None'.
                            dob.role = None
                else:  # Empty or legacy role entry.
                    # We have either None or a legacy mdraid role when this disk
                    # is no longer an mdraid member. We can now assert None.
                    dob.role = None
            # If our existing Pool db knows of this disk's pool via it's label:
            if (Pool.objects.filter(name=d.label).exists()):
                # update the disk db object's pool field accordingly.
                dob.pool = Pool.objects.get(name=d.label)

                #this is for backwards compatibility. root pools created
                #before the pool.role migration need this. It can safely be
                #removed a few versions after 3.8-11 or when we reset migrations.
                if (d.root is True):
                    dob.pool.role = 'root'
                    dob.pool.save()
            else:  # this disk is not known to exist in any pool via it's label
                dob.pool = None
            # If no pool has yet been found with this disk's label in and
            # the attached disk is our root disk (flagged by scan_disks)
            if (dob.pool is None and d.root is True):
                # setup our special root disk db entry in Pool
                # TODO: dynamically retrieve raid level.
                p = Pool(name=d.label, raid='single', role='root')
                p.disk_set.add(dob)
                p.save()
                # update disk db object to reflect special root pool status
                dob.pool = p
                dob.save()
                p.size = pool_usage(mount_root(p))[0]
                enable_quota(p)
                p.uuid = btrfs_uuid(dob.name)
                p.save()
            # save our updated db disk object
            dob.save()
        # Update online db entries with S.M.A.R.T availability and status.
        for do in Disk.objects.all():
            # find all the not offline db entries
            if (not do.offline):
                # We have an attached disk db entry.
                # Since our Disk.name model now uses by-id type names we can
                # do cheap matches to the beginnings of these names to find
                # virtio, md, or sdcard devices which are assumed to have no
                # SMART capability.
                # We also disable devices smart support when they have a
                # fake serial number as ascribed by scan_disks as any SMART
                # data collected is then less likely to be wrongly associated
                # with the next device that takes this temporary drive's name.
                # Also note that with no serial number some device types will
                # not have a by-id type name expected by the smart subsystem.
                # This has only been observed in no serial virtio devices.
                if (re.match('fake-serial-', do.serial) is not None) or \
                        (re.match('virtio-|md-|mmc-|nvme-', do.name) is not None):
                    # Virtio disks (named virtio-*), md devices (named md-*),
                    # and an sdcard reader that provides devs named mmc-* have
                    # no smart capability so avoid cluttering logs with
                    # exceptions on probing these with smart.available.
                    # nvme not yet supported by CentOS 7 smartmontools:
                    # https://www.smartmontools.org/ticket/657
                    # Thanks to @snafu in rockstor forum post 1567 for this.
                    do.smart_available = do.smart_enabled = False
                    continue
                # try to establish smart availability and status and update db
                try:
                    # for non ata/sata drives
                    do.smart_available, do.smart_enabled = smart.available(
                        do.name, do.smart_options)
                except Exception, e:
                    logger.exception(e)
                    do.smart_available = do.smart_enabled = False
            do.save()
Beispiel #23
0
    def test_compression(self, mock_pool):
        """
        Test PUT request to update share compression_algo
        1. Create a share with invalid compression
        2. Create a share with zlib compression
        3. Create a share with lzo compression
        4. change compression from zlib to lzo
        5. change compression from lzo to zlib
        6. disable zlib, enable zlib
        7. disable lzo, enable lzo
        """

        mock_pool.objects.get.side_effect = None
        temp_pool = Pool(id=1, name='rockstor_rockstor', size=88025459)
        mock_pool.objects.get.return_value = temp_pool

        # Mock exists to return false. Avoids all possible share names taken:
        # i.e. 'A pool with this name (rootshare) exists. Share and pool
        # names must be distinct. ...'
        # from: "Pool.objects.filter(name=sname).exists()"
        mock_pool.objects.filter.return_value.exists.return_value = False

        # create share with invalid compression
        data = {
            'sname': 'rootshare',
            'pool': 'rockstor_rockstor',
            'size': 100,
            'compression': 'derp'
        }
        e_msg = ("Unsupported compression algorithm (derp). "
                 "Use one of ('lzo', 'zlib', 'no').")
        response = self.client.post(self.BASE_URL, data=data)
        self.assertEqual(response.status_code,
                         status.HTTP_500_INTERNAL_SERVER_ERROR,
                         msg=response.data)
        self.assertEqual(response.data[0], e_msg)

        # # avoid "Share (rootshare) already exists. ..."
        # # TODO: Do we have an issue here as rootshare should not already
        # # TODO: exits, assuming this is a mocking side effect.
        # mock_share.objects.filter.return_value.exists.return_value = False

        # create share with zlib compression
        data['compression'] = 'zlib'
        response = self.client.post(self.BASE_URL, data=data)
        self.assertEqual(response.status_code,
                         status.HTTP_200_OK,
                         msg=response.data)
        # self.assertEqual(response.data, 'derp')
        self.assertEqual(response.data['compression_algo'], 'zlib')
        share = Share.objects.get(name='rootshare')
        sId = share.id

        # change compression from zlib to lzo
        # mock_share.objects.get.return_value = temp_share
        data3 = {'compression': 'lzo'}
        response3 = self.client.put('{}/{}'.format(self.BASE_URL, sId),
                                    data=data3)
        self.assertEqual(response3.status_code,
                         status.HTTP_200_OK,
                         msg=response3.data)
        self.assertEqual(response3.data['compression_algo'], 'lzo')

        # create share with lzo compression
        data2 = {
            'sname': 'share2',
            'pool': 'rockstor_rockstor',
            'size': 100,
            'compression': 'lzo'
        }
        response2 = self.client.post(self.BASE_URL, data=data2)
        self.assertEqual(response2.status_code,
                         status.HTTP_200_OK,
                         msg=response2.data)
        self.assertEqual(response2.data['compression_algo'], 'lzo')

        # change compression from lzo to zlib
        data4 = {'compression': 'zlib'}
        response4 = self.client.put('{}/{}'.format(self.BASE_URL, sId),
                                    data=data4)
        self.assertEqual(response4.status_code,
                         status.HTTP_200_OK,
                         msg=response4.data)
        self.assertEqual(response4.data['compression_algo'], 'zlib')

        # disable zlib compression
        data5 = {'compression': 'no'}
        response5 = self.client.put('{}/{}'.format(self.BASE_URL, sId),
                                    data=data5)
        self.assertEqual(response5.status_code,
                         status.HTTP_200_OK,
                         msg=response5.data)
        self.assertEqual(response5.data['compression_algo'], 'no')

        # enable zlib compression
        response6 = self.client.put('{}/{}'.format(self.BASE_URL, sId),
                                    data=data4)
        self.assertEqual(response6.status_code,
                         status.HTTP_200_OK,
                         msg=response6.data)
        self.assertEqual(response6.data['compression_algo'], 'zlib')

        # disable lzo compression
        response7 = self.client.put('{}/{}'.format(self.BASE_URL, sId),
                                    data=data5)
        self.assertEqual(response7.status_code,
                         status.HTTP_200_OK,
                         msg=response7.data)
        self.assertEqual(response7.data['compression_algo'], 'no')

        # enable lzo compression
        response8 = self.client.put('{}/{}'.format(self.BASE_URL, sId),
                                    data=data3)
        self.assertEqual(response8.status_code,
                         status.HTTP_200_OK,
                         msg=response8.data)
        self.assertEqual(response8.data['compression_algo'], 'lzo')
Beispiel #24
0
    def test_delete_set1(self, mock_snapshot, mock_nfs, mock_samba, mock_sftp,
                         mock_pool):
        """
        Test DELETE request on share
        1. Create valid share
        2. Delete share with replication related snapshots
        3. Delete share with NFS export
        4. Delete share that is shared via Samba
        6. Delete share with SFTP export
        8. Delete nonexistent share
        """

        mock_pool.objects.get.side_effect = None
        temp_pool = Pool(id=1, name='rockstor_rockstor', size=88025459)
        mock_pool.objects.get.return_value = temp_pool

        # Mock exists to return false. Avoids all possible share names taken:
        # i.e. 'A pool with this name (rootshare) exists. Share and pool
        # names must be distinct. ...'
        # from: "Pool.objects.filter(name=sname).exists()"
        mock_pool.objects.filter.return_value.exists.return_value = False

        # create share
        data = {'sname': 'rootshare', 'pool': 'rockstor_rockstor', 'size': 100}
        response = self.client.post(self.BASE_URL, data=data)
        self.assertEqual(response.status_code,
                         status.HTTP_200_OK,
                         msg=response.data)
        self.assertEqual(response.data['name'], 'rootshare')
        share = Share.objects.get(name='rootshare')
        sId = share.id

        # Delete share with replication related snapshots
        # TODO: check not false positive (see: test_delete_share_with_snapshot)
        mock_snapshot.objects.filter(
            share=share, snap_type='replication').exists.return_value = True
        e_msg = ('Share (rootshare) cannot be deleted as it has replication '
                 'related snapshots.')
        response2 = self.client.delete('{}/{}'.format(self.BASE_URL, sId))
        self.assertEqual(response2.status_code,
                         status.HTTP_500_INTERNAL_SERVER_ERROR,
                         msg=response2.data)
        self.assertEqual(response2.data[0], e_msg)
        mock_snapshot.objects.filter(
            share=share, snap_type='replication').exists.return_value = False

        # Delete share with NFS export
        mock_nfs.objects.filter(share=share).exists.return_value = True
        e_msg = ('Share (rootshare) cannot be deleted as it is exported via '
                 'NFS. Delete NFS exports and try again.')
        response3 = self.client.delete('{}/{}'.format(self.BASE_URL, sId))
        self.assertEqual(response3.status_code,
                         status.HTTP_500_INTERNAL_SERVER_ERROR,
                         msg=response3.data)
        self.assertEqual(response3.data[0], e_msg)
        mock_nfs.objects.filter(share=share).exists.return_value = False

        # Delete share that is shared via Samba
        mock_samba.objects.filter(share=share).exists.return_value = True
        e_msg = ('Share (rootshare) cannot be deleted as it is shared via '
                 'Samba. Unshare and try again.')
        response4 = self.client.delete('{}/{}'.format(self.BASE_URL, sId))
        self.assertEqual(response4.status_code,
                         status.HTTP_500_INTERNAL_SERVER_ERROR,
                         msg=response4.data)
        self.assertEqual(response4.data[0], e_msg)
        mock_samba.objects.filter(share=share).exists.return_value = False

        # Delete share with SFTP export
        mock_sftp.objects.filter(share=share).exists.return_value = True
        e_msg = ('Share (rootshare) cannot be deleted as it is exported via '
                 'SFTP. Delete SFTP export and try again.')
        response6 = self.client.delete('{}/{}'.format(self.BASE_URL, sId))
        self.assertEqual(response6.status_code,
                         status.HTTP_500_INTERNAL_SERVER_ERROR,
                         msg=response6.data)
        self.assertEqual(response6.data[0], e_msg)
        mock_sftp.objects.filter(share=share).exists.return_value = False

        # delete a share that doesn't exist
        sId_fake = 99999
        e_msg = 'Share id ({}) does not exist.'.format(sId_fake)
        response9 = self.client.delete('{}/{}'.format(self.BASE_URL, sId_fake))
        self.assertEqual(response9.status_code,
                         status.HTTP_500_INTERNAL_SERVER_ERROR,
                         msg=response9.data)
        self.assertEqual(response9.data[0], e_msg)
Beispiel #25
0
    def post(self, request):
        """
        input is a list of disks, raid_level and name of the pool.
        """
        with self._handle_exception(request):
            disks = [self._validate_disk(d, request) for d in request.data.get("disks")]
            pname = request.data["pname"]
            if re.match("%s$" % settings.POOL_REGEX, pname) is None:
                e_msg = (
                    "Pool name must start with a alphanumeric(a-z0-9) "
                    "character and can be followed by any of the "
                    "following characters: letter(a-z), digits(0-9), "
                    "hyphen(-), underscore(_) or a period(.)."
                )
                handle_exception(Exception(e_msg), request)

            if Pool.objects.filter(name=pname).exists():
                e_msg = "Pool(%s) already exists. Choose a different name" % pname
                handle_exception(Exception(e_msg), request)

            if Share.objects.filter(name=pname).exists():
                e_msg = (
                    "A Share with this name(%s) exists. Pool and Share names "
                    "must be distinct. Choose a different name" % pname
                )
                handle_exception(Exception(e_msg), request)

            for d in disks:
                if d.btrfs_uuid is not None:
                    e_msg = (
                        "Another BTRFS filesystem exists on this " "disk(%s). Erase the disk and try again." % d.name
                    )
                    handle_exception(Exception(e_msg), request)

            raid_level = request.data["raid_level"]
            if raid_level not in self.RAID_LEVELS:
                e_msg = "Unsupported raid level. use one of: {}".format(self.RAID_LEVELS)
                handle_exception(Exception(e_msg), request)
            # consolidated raid0 & raid 1 disk check
            if raid_level in self.RAID_LEVELS[1:3] and len(disks) <= 1:
                e_msg = "At least two disks are required for the raid level: " "%s" % raid_level
                handle_exception(Exception(e_msg), request)
            if raid_level == self.RAID_LEVELS[3]:
                if len(disks) < 4:
                    e_msg = "A minimum of Four drives are required for the " "raid level: %s" % raid_level
                    handle_exception(Exception(e_msg), request)
            if raid_level == self.RAID_LEVELS[4] and len(disks) < 2:
                e_msg = "Two or more disks are required for the raid " "level: %s" % raid_level
                handle_exception(Exception(e_msg), request)
            if raid_level == self.RAID_LEVELS[5] and len(disks) < 3:
                e_msg = "Three or more disks are required for the raid " "level: %s" % raid_level
                handle_exception(Exception(e_msg), request)

            compression = self._validate_compression(request)
            mnt_options = self._validate_mnt_options(request)
            dnames = [d.name for d in disks]
            p = Pool(name=pname, raid=raid_level, compression=compression, mnt_options=mnt_options)
            add_pool(p, dnames)
            p.size = pool_usage(mount_root(p, dnames[0]))[0]
            p.uuid = btrfs_uuid(dnames[0])
            p.disk_set.add(*disks)
            p.save()
            # added for loop to save disks
            # appears p.disk_set.add(*disks) was not saving disks in test environment
            for d in disks:
                d.pool = p
                d.save()
            return Response(PoolInfoSerializer(p).data)
Beispiel #26
0
    def post(self, request):
        """
        input is a list of disks, raid_level and name of the pool.
        """
        with self._handle_exception(request):
            disks = [self._validate_disk(d, request) for d in
                     request.data.get('disks')]
            pname = request.data['pname']
            if (re.match('%s$' % settings.POOL_REGEX, pname) is None):
                e_msg = ('Pool name must start with a alphanumeric(a-z0-9) '
                         'character and can be followed by any of the '
                         'following characters: letter(a-z), digits(0-9), '
                         'hyphen(-), underscore(_) or a period(.).')
                handle_exception(Exception(e_msg), request)

            if (len(pname) > 255):
                e_msg = ('Pool name must be less than 255 characters')
                handle_exception(Exception(e_msg), request)

            if (Pool.objects.filter(name=pname).exists()):
                e_msg = ('Pool(%s) already exists. Choose a different name' % pname)
                handle_exception(Exception(e_msg), request)

            if (Share.objects.filter(name=pname).exists()):
                e_msg = ('A Share with this name(%s) exists. Pool and Share names '
                         'must be distinct. Choose a different name' % pname)
                handle_exception(Exception(e_msg), request)

            for d in disks:
                if (d.btrfs_uuid is not None):
                    e_msg = ('Another BTRFS filesystem exists on this '
                             'disk(%s). Erase the disk and try again.'
                             % d.name)
                    handle_exception(Exception(e_msg), request)

            raid_level = request.data['raid_level']
            if (raid_level not in self.RAID_LEVELS):
                e_msg = ('Unsupported raid level. use one of: {}'.format(self.RAID_LEVELS))
                handle_exception(Exception(e_msg), request)
            # consolidated raid0 & raid 1 disk check
            if (raid_level in self.RAID_LEVELS[1:3] and len(disks) <= 1):
                e_msg = ('At least two disks are required for the raid level: '
                         '%s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[3]):
                if (len(disks) < 4):
                    e_msg = ('A minimum of Four drives are required for the '
                             'raid level: %s' % raid_level)
                    handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[4] and len(disks) < 2):
                e_msg = ('Two or more disks are required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[5] and len(disks) < 3):
                e_msg = ('Three or more disks are required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)

            compression = self._validate_compression(request)
            mnt_options = self._validate_mnt_options(request)
            dnames = [d.name for d in disks]
            p = Pool(name=pname, raid=raid_level, compression=compression,
                     mnt_options=mnt_options)
            p.disk_set.add(*disks)
            p.save()
            # added for loop to save disks
            # appears p.disk_set.add(*disks) was not saving disks in test environment
            for d in disks:
                d.pool = p
                d.save()
            add_pool(p, dnames)
            p.size = pool_usage(mount_root(p))[0]
            p.uuid = btrfs_uuid(dnames[0])
            p.save()
            return Response(PoolInfoSerializer(p).data)
Beispiel #27
0
    def post(self, request, uuid):
        """
        import a pool with given uuid
        """
        disks = Disk.objects.filter(btrfs_uuid=uuid)

        if (not btrfs_importable(disks[0].name)):
            e_msg = ('btrfs check failed on device: %s Cannot automatically '
                     'import the pool with uuid: %s' % (disks[0].name, uuid))
            handle_exception(Exception(e_msg), request)

        #get name of the pool
        pname = btrfs_label(uuid)

        #mount the pool
        mount_root(pname, '/dev/%s' % disks[0].name)
        pool_mnt_pt = '%s/%s' % (settings.MNT_PT, pname)

        #get raid level
        raid_level = btrfs_raid_level(pname)
        if (raid_level is None):
            umount_root(pool_mnt_pt)
            e_msg = ('Problem while probing for the raid level of the pool.'
                     'Cannot automatically import the pool with uuid: %s' %
                     uuid)
            handle_exception(Exception(e_msg), request)

        #check for shares in the pool
        subvols, e, rc = subvol_list_helper(pool_mnt_pt)
        snap_list = snapshot_list(pool_mnt_pt)
        share_map = {}
        for s in subvols:
            s_fields = s.split()
            if (s_fields[-1] not in snap_list):
                share_map[s_fields[-1]] = s_fields[1]

        entries = os.listdir(pool_mnt_pt)
        e_msg_prefix = ('Only btrfs filesystem with nothing but subvolumes in '
                        'it can be imported.')
        for e in entries:
            if (os.path.isfile('%s/%s' % (pool_mnt_pt, e))):
                e_msg = ('%s Unexpected file %s found. Due to this reason, '
                         'pool with uuid: %s cannot be imported' %
                         (e_msg_prefix, e, uuid))
                handle_exception(Exception(e_msg), request)
            elif (e not in share_map):
                e_msg = ('%s Unexpected directory %s found. Due to this '
                         'reason, pool with uuid: %s cannot be imported' %
                         (e_msg_prefix, e, uuid))
                handle_exception(Exception(e_msg), request)

        #add pool model
        pool_size = self._pool_size(disks, raid_level)
        p = Pool(name=pname, raid=raid_level, size=pool_size, uuid=uuid)
        p.save()

        #import shares
        for s in share_map.keys():
            so = Share(pool=p,
                       qgroup='0/%s' % share_map[s],
                       name=s,
                       size=qgroup_size,
                       subvol_name=s,
                       replica=False)
            so.save()

            #import snapshots?
            for snap in snap_list:
                snap_fields = snap.split('_')
                snap_name = snap_fields[-1]
                sname = '_'.join(snap_fields[0:-1])
                if (sname == s):
                    snapo = Snapshot(share=so,
                                     name=snap_name,
                                     real_name=snap,
                                     qgroup=qgroup_id)
                    snapo.save()