Пример #1
0
 def _btrfs_disk_import(self, dname, request):
     try:
         disk = self._validate_disk(dname, request)
         p_info = get_pool_info(dname)
         # get some options from saved config?
         po = Pool(name=p_info['label'], raid="unknown")
         # need to save it so disk objects get updated properly in the for
         # loop below.
         po.save()
         for d in p_info['disks']:
             do = Disk.objects.get(name=d)
             do.pool = po
             do.parted = False
             do.save()
             mount_root(po)
         po.raid = pool_raid('%s%s' % (settings.MNT_PT, po.name))['data']
         po.size = pool_usage('%s%s' % (settings.MNT_PT, po.name))[0]
         po.save()
         enable_quota(po)
         import_shares(po, request)
         for share in Share.objects.filter(pool=po):
             import_snapshots(share)
         return Response(DiskInfoSerializer(disk).data)
     except Exception, e:
         e_msg = ('Failed to import any pool on this device(%s). Error: %s'
                  % (dname, e.__str__()))
         handle_exception(Exception(e_msg), request)
Пример #2
0
 def _refresh_pool_state():
     for p in Pool.objects.all():
         # If our pool has no disks, detached included, then delete it.
         # We leave pools with all detached members in place intentionally.
         if (p.disk_set.count() == 0):
             p.delete()
             continue
         # Log if no attached members are found, ie all devs are detached.
         if p.disk_set.attached().count() == 0:
             logger.error('Skipping Pool (%s) mount as there '
                          'are no attached devices. Moving on.' %
                          p.name)
             continue
         try:
             mount_root(p)
             first_attached_dev = p.disk_set.attached().first()
             # Observe any redirect role by using target_name.
             pool_info = get_pool_info(first_attached_dev.target_name)
             p.name = pool_info['label']
             p.raid = pool_raid('%s%s' % (settings.MNT_PT, p.name))['data']
             p.size = p.usage_bound()
             p.save()
         except Exception as e:
             logger.error('Exception while refreshing state for '
                          'Pool(%s). Moving on: %s' %
                          (p.name, e.__str__()))
             logger.exception(e)
Пример #3
0
 def _btrfs_disk_import(self, dname, request):
     try:
         disk = self._validate_disk(dname, request)
         p_info = get_pool_info(dname)
         # get some options from saved config?
         po = Pool(name=p_info['label'], raid="unknown")
         # need to save it so disk objects get updated properly in the for
         # loop below.
         po.save()
         for d in p_info['disks']:
             do = Disk.objects.get(name=d)
             do.pool = po
             do.parted = False
             do.save()
             mount_root(po)
         po.raid = pool_raid('%s%s' % (settings.MNT_PT, po.name))['data']
         po.size = pool_usage('%s%s' % (settings.MNT_PT, po.name))[0]
         po.save()
         enable_quota(po)
         import_shares(po, request)
         for share in Share.objects.filter(pool=po):
             import_snapshots(share)
         return Response(DiskInfoSerializer(disk).data)
     except Exception, e:
         e_msg = (
             'Failed to import any pool on this device(%s). Error: %s' %
             (dname, e.__str__()))
         handle_exception(Exception(e_msg), request)
Пример #4
0
 def _refresh_pool_state():
     for p in Pool.objects.all():
         if (p.disk_set.count() == 0):
             p.delete()
             continue
         try:
             mount_root(p)
             first_dev = p.disk_set.first()
             first_dev_name = first_dev.name
             # if we are looking at a device with a redirect role then
             # redirect accordingly.
             if first_dev.role is not None:
                 disk_role_dict = json.loads(first_dev.role)
                 if 'redirect' in disk_role_dict:
                     # consider replacing None with first_dev.name
                     first_dev_name = disk_role_dict.get('redirect', None)
             pool_info = get_pool_info(first_dev_name)
             p.name = pool_info['label']
             p.raid = pool_raid('%s%s' % (settings.MNT_PT, p.name))['data']
             p.size = p.usage_bound()
             p.save()
         except Exception as e:
             logger.error('Exception while refreshing state for '
                          'Pool(%s). Moving on: %s' % (p.name, e.__str__()))
             logger.exception(e)
Пример #5
0
 def _refresh_pool_state():
     # Get map of dm-0 to /dev/mapper members ie luks-.. devices.
     mapped_devs = get_device_mapper_map()
     # Get temp_names (kernel names) to btrfs pool info for attached devs.
     dev_pool_info = get_dev_pool_info()
     for p in Pool.objects.all():
         # If our pool has no disks, detached included, then delete it.
         # We leave pools with all detached members in place intentionally.
         if p.disk_set.count() == 0:
             p.delete()
             continue
         # Log if no attached members are found, ie all devs are detached.
         if p.disk_set.attached().count() == 0:
             logger.error("Skipping Pool ({}) mount as there "
                          "are no attached devices. Moving on.".format(
                              p.name))
             continue
         # If pool has no missing remove all detached disk pool associations.
         # Accounts for 'end of run' clean-up in removing a detached disk and for cli
         # maintenance re pool returned to no missing dev status. Also re-establishes
         # pool info as source of truth re missing.
         if not p.has_missing_dev:
             for disk in p.disk_set.filter(name__startswith="detached-"):
                 logger.info(
                     "Removing detached disk from Pool {}: no missing "
                     "devices found.".format(p.name))
                 disk.pool = None
                 disk.save()
         try:
             # Get and save what info we can prior to mount.
             first_dev = p.disk_set.attached().first()
             # Use target_name to account for redirect role.
             if first_dev.target_name == first_dev.temp_name:
                 logger.error(
                     "Skipping pool ({}) mount as attached disk "
                     "({}) has no by-id name (no serial # ?)".format(
                         p.name, first_dev.target_name))
                 continue
             if first_dev.temp_name in mapped_devs:
                 dev_tmp_name = "/dev/mapper/{}".format(
                     mapped_devs[first_dev.temp_name])
             else:
                 dev_tmp_name = "/dev/{}".format(first_dev.temp_name)
             # For now we call get_dev_pool_info() once for each pool.
             pool_info = dev_pool_info[dev_tmp_name]
             p.name = pool_info.label
             p.uuid = pool_info.uuid
             p.save()
             mount_root(p)
             p.raid = pool_raid(p.mnt_pt)["data"]
             p.size = p.usage_bound()
             # Consider using mount_status() parse to update root pool db on
             # active (fstab initiated) compression setting.
             p.save()
         except Exception as e:
             logger.error("Exception while refreshing state for "
                          "Pool({}). Moving on: {}".format(
                              p.name, e.__str__()))
             logger.exception(e)
Пример #6
0
 def _refresh_pool_state(self, pool):
     dname = Disk.objects.filter(pool=pool)[0].name
     mount_root(pool, dname)
     pool_info = get_pool_info(dname)
     pool.name = pool_info['label']
     pool.raid = pool_raid('%s%s' % (settings.MNT_PT, pool.name))['data']
     pool.size = pool_usage('%s%s' % (settings.MNT_PT, pool.name))[0]
     pool.save()
     return pool
Пример #7
0
 def _refresh_pool_state(self, pool):
     fd = pool.disk_set.first()
     if fd is None:
         return pool.delete()
     mount_root(pool, fd.name)
     pool_info = get_pool_info(fd.name)
     pool.name = pool_info["label"]
     pool.raid = pool_raid("%s%s" % (settings.MNT_PT, pool.name))["data"]
     pool.size = pool_usage("%s%s" % (settings.MNT_PT, pool.name))[0]
     pool.save()
     return pool
Пример #8
0
 def _refresh_pool_state(self, pool):
     fd = pool.disk_set.first()
     if (fd is None):
         return pool.delete()
     mount_root(pool, fd.name)
     pool_info = get_pool_info(fd.name)
     pool.name = pool_info['label']
     pool.raid = pool_raid('%s%s' % (settings.MNT_PT, pool.name))['data']
     pool.size = pool_usage('%s%s' % (settings.MNT_PT, pool.name))[0]
     pool.save()
     return pool
Пример #9
0
 def _refresh_pool_state():
     # Get map of dm-0 to /dev/mapper members ie luks-.. devices.
     mapped_devs = get_device_mapper_map()
     # Get temp_names (kernel names) to btrfs pool info for attached devs.
     dev_pool_info = get_dev_pool_info()
     for p in Pool.objects.all():
         # If our pool has no disks, detached included, then delete it.
         # We leave pools with all detached members in place intentionally.
         if p.disk_set.count() == 0:
             p.delete()
             continue
         # Log if no attached members are found, ie all devs are detached.
         if p.disk_set.attached().count() == 0:
             logger.error("Skipping Pool ({}) mount as there "
                          "are no attached devices. Moving on.".format(
                              p.name))
             continue
         try:
             # Get and save what info we can prior to mount.
             first_dev = p.disk_set.attached().first()
             # Use target_name to account for redirect role.
             if first_dev.target_name == first_dev.temp_name:
                 logger.error(
                     "Skipping pool ({}) mount as attached disk "
                     "({}) has no by-id name (no serial # ?)".format(
                         p.name, first_dev.target_name))
                 continue
             if first_dev.temp_name in mapped_devs:
                 dev_tmp_name = "/dev/mapper/{}".format(
                     mapped_devs[first_dev.temp_name])
             else:
                 dev_tmp_name = "/dev/{}".format(first_dev.temp_name)
             # For now we call get_dev_pool_info() once for each pool.
             pool_info = dev_pool_info[dev_tmp_name]
             p.name = pool_info.label
             p.uuid = pool_info.uuid
             p.save()
             mount_root(p)
             p.raid = pool_raid(p.mnt_pt)["data"]
             p.size = p.usage_bound()
             # Consider using mount_status() parse to update root pool db on
             # active (fstab initiated) compression setting.
             p.save()
         except Exception as e:
             logger.error("Exception while refreshing state for "
                          "Pool({}). Moving on: {}".format(
                              p.name, e.__str__()))
             logger.exception(e)
Пример #10
0
def main():
    for p in Pool.objects.all():
        try:
            print("Processing pool(%s)" % p.name)
            mnt_pt = mount_root(p)
            o, e, rc = run_command([BTRFS, "subvol", "list", mnt_pt])
            subvol_ids = []
            for l in o:
                if re.match("ID ", l) is not None:
                    subvol_ids.append(l.split()[1])

            o, e, rc = run_command([BTRFS, "qgroup", "show", mnt_pt], throw=False)
            if rc != 0:
                print("Quotas not enabled on pool(%s). Skipping it." % p.name)
                continue

            qgroup_ids = []
            for l in o:
                if re.match("0/", l) is not None:
                    q = l.split()[0].split("/")[1]
                    if q == "5":
                        continue
                    qgroup_ids.append(l.split()[0].split("/")[1])

            for q in qgroup_ids:
                if q not in subvol_ids:
                    print("qgroup %s not in use. deleting" % q)
                    run_command([BTRFS, "qgroup", "destroy", "0/%s" % q, mnt_pt])
                else:
                    print("qgroup %s is in use. Moving on." % q)
            print("Finished processing pool(%s)" % p.name)
        except Exception, e:
            print("Exception while qgroup-cleanup of Pool(%s): %s" % (p.name, e.__str__()))
Пример #11
0
def main():
    for p in Pool.objects.all():
        try:
            print("Processing pool(%s)" % p.name)
            mnt_pt = mount_root(p)
            o, e, rc = run_command([BTRFS, "qgroup", "show", "-p", mnt_pt], throw=False)
            if rc != 0:
                print("Quotas not enabled on pool(%s). Skipping it." % p.name)
                continue

            qgroup_ids = []
            for l in o:
                if re.match("qgroupid", l) is not None or re.match("-------", l) is not None:
                    continue
                cols = l.strip().split()
                if len(cols) != 4:
                    print("Ignoring unexcepted line(%s)." % l)
                    continue
                if cols[3] == "---":
                    print("No parent qgroup for %s" % l)
                    continue
                qgroup_ids.append(cols[3])

            for q in qgroup_ids:
                print("relaxing the limit on qgroup %s" % q)
                run_command([BTRFS, "qgroup", "limit", "none", q, mnt_pt])

            print("Finished processing pool(%s)" % p.name)
        except Exception, e:
            print("Exception while qgroup-maxout of Pool(%s): %s" % (p.name, e.__str__()))
Пример #12
0
def main():
    for p in Pool.objects.all():
        print('Processing pool(%s)' % p.name)
        mnt_pt = mount_root(p)
        o, e, rc = run_command([BTRFS, 'subvol', 'list', mnt_pt])
        subvol_ids = []
        for l in o:
            if (re.match('ID ', l) is not None):
                subvol_ids.append(l.split()[1])

        o, e, rc = run_command([BTRFS, 'qgroup', 'show', mnt_pt], throw=False)
        if (rc != 0):
            print('Quotas not enabled on pool(%s). Skipping it.' % p.name)
            continue

        qgroup_ids = []
        for l in o:
            if (re.match('0/', l) is not None):
                q = l.split()[0].split('/')[1]
                if (q == '5'):
                    continue
                qgroup_ids.append(l.split()[0].split('/')[1])

        for q in qgroup_ids:
            if (q not in subvol_ids):
                print('qgroup %s not in use. deleting' % q)
                run_command([BTRFS, 'qgroup', 'destroy', '0/%s' % q, mnt_pt])
            else:
                print('qgroup %s is in use. Moving on.' % q)
        print('Finished processing pool(%s)' % p.name)
Пример #13
0
def main():
    for p in Pool.objects.all():
        print('Processing pool(%s)' % p.name)
        mnt_pt = mount_root(p)
        o, e, rc = run_command([BTRFS, 'qgroup', 'show', '-p', mnt_pt],
                               throw=False)
        if (rc != 0):
            print('Quotas not enabled on pool(%s). Skipping it.' % p.name)
            continue

        qgroup_ids = []
        for l in o:
            if (re.match('qgroupid', l) is not None or
                re.match('-------', l) is not None):
                continue
            cols = l.strip().split()
            if (len(cols) != 4):
                print('Ignoring unexcepted line(%s).' % l)
                continue
            if (cols[3] == '---'):
                print('No parent qgroup for %s' % l)
                continue
            qgroup_ids.append(cols[3])

        for q in qgroup_ids:
            print('relaxing the limit on qgroup %s' % q)
            run_command([BTRFS, 'qgroup', 'limit', 'none', q, mnt_pt])

        print('Finished processing pool(%s)' % p.name)
Пример #14
0
def main():
    for p in Pool.objects.all():
        try:
            print("Processing pool(%s)" % p.name)
            mnt_pt = mount_root(p)
            o, e, rc = run_command([BTRFS, "qgroup", "show", "-p", mnt_pt],
                                   throw=False)
            if rc != 0:
                print("Quotas not enabled on pool(%s). Skipping it." % p.name)
                continue

            qgroup_ids = []
            for l in o:
                if (re.match("qgroupid", l) is not None
                        or re.match("-------", l) is not None):
                    continue
                cols = l.strip().split()
                if len(cols) != 4:
                    print("Ignoring unexcepted line(%s)." % l)
                    continue
                if cols[3] == "---":
                    print("No parent qgroup for %s" % l)
                    continue
                qgroup_ids.append(cols[3])

            for q in qgroup_ids:
                print("relaxing the limit on qgroup %s" % q)
                run_command([BTRFS, "qgroup", "limit", "none", q, mnt_pt])

            print("Finished processing pool(%s)" % p.name)
        except Exception as e:
            print("Exception while qgroup-maxout of Pool(%s): %s" %
                  (p.name, e.__str__()))
Пример #15
0
def main():
    for p in Pool.objects.all():
        print('Processing pool(%s)' % p.name)
        mnt_pt = mount_root(p)
        o, e, rc = run_command([BTRFS, 'subvol', 'list', mnt_pt])
        subvol_ids = []
        for l in o:
            if (re.match('ID ', l) is not None):
                subvol_ids.append(l.split()[1])

        o, e, rc = run_command([BTRFS, 'qgroup', 'show', mnt_pt], throw=False)
        if (rc != 0):
            print('Quotas not enabled on pool(%s). Skipping it.' % p.name)
            continue

        qgroup_ids = []
        for l in o:
            if (re.match('0/', l) is not None):
                q = l.split()[0].split('/')[1]
                if (q == '5'):
                    continue
                qgroup_ids.append(l.split()[0].split('/')[1])

        for q in qgroup_ids:
            if (q not in subvol_ids):
                print('qgroup %s not in use. deleting' % q)
                run_command([BTRFS, 'qgroup', 'destroy', '0/%s' % q, mnt_pt])
            else:
                print('qgroup %s is in use. Moving on.' % q)
        print('Finished processing pool(%s)' % p.name)
Пример #16
0
 def _refresh_pool_state():
     for p in Pool.objects.all():
         if (p.disk_set.count() == 0):
             p.delete()
             continue
         try:
             mount_root(p)
             fd = p.disk_set.first()
             pool_info = get_pool_info(fd.name)
             p.name = pool_info['label']
             p.raid = pool_raid('%s%s' % (settings.MNT_PT, p.name))['data']
             p.size = p.usage_bound()
             p.save()
         except Exception, e:
             logger.error('Exception while refreshing state for '
                          'Pool(%s). Moving on: %s' % (p.name, e.__str__()))
             logger.exception(e)
Пример #17
0
 def _btrfs_disk_import(self, dname, request):
     try:
         disk = self._validate_disk(dname, request)
         disk_name = self._role_filter_disk_name(disk, request)
         p_info = get_pool_info(disk_name)
         # get some options from saved config?
         po = Pool(name=p_info['label'], raid="unknown")
         # need to save it so disk objects get updated properly in the for
         # loop below.
         po.save()
         for device in p_info['disks']:
             disk_name, isPartition = \
                 self._reverse_role_filter_name(device, request)
             do = Disk.objects.get(name=disk_name)
             do.pool = po
             # update this disk's parted property
             do.parted = isPartition
             if isPartition:
                 # ensure a redirect role to reach this partition; ie:
                 # "redirect": "virtio-serial-3-part2"
                 if do.role is not None:  # db default is null / None.
                     # Get our previous roles into a dictionary
                     roles = json.loads(do.role)
                     # update or add our "redirect" role with our part name
                     roles['redirect'] = '%s' % device
                     # convert back to json and store in disk object
                     do.role = json.dumps(roles)
                 else:
                     # role=None so just add a json formatted redirect role
                     do.role = '{"redirect": "%s"}' % device.name
             do.save()
             mount_root(po)
         po.raid = pool_raid('%s%s' % (settings.MNT_PT, po.name))['data']
         po.size = po.usage_bound()
         po.save()
         enable_quota(po)
         import_shares(po, request)
         for share in Share.objects.filter(pool=po):
             import_snapshots(share)
         return Response(DiskInfoSerializer(disk).data)
     except Exception as e:
         e_msg = (
             'Failed to import any pool on this device(%s). Error: %s' %
             (dname, e.__str__()))
         handle_exception(Exception(e_msg), request)
Пример #18
0
    def post(self, request, command):
        if (command == 'bootstrap'):
            try:
                device_scan()
            except Exception, e:
                e_msg = ('Unabled to scan disk drives on the system.')
                logger.error(e_msg)
                logger.exception(e)
                handle_exception(Exception(e_msg), request)

            for pool in Pool.objects.all():
                disk = Disk.objects.filter(pool=pool)[0].name
                try:
                    mount_root(pool, '/dev/%s' % disk)
                except Exception, e:
                    e_msg = ('Unable to mount a pool(%s) during bootstrap.'
                             % pool.name)
                    logger.exception(e)
Пример #19
0
 def _refresh_pool_state():
     for p in Pool.objects.all():
         if (p.disk_set.count() == 0):
             p.delete()
             continue
         try:
             mount_root(p)
             fd = p.disk_set.first()
             pool_info = get_pool_info(fd.name)
             p.name = pool_info['label']
             p.raid = pool_raid('%s%s' % (settings.MNT_PT, p.name))['data']
             p.size = pool_usage('%s%s' % (settings.MNT_PT, p.name))[0]
             p.save()
         except Exception, e:
             logger.error('Exception while refreshing state for '
                          'Pool(%s). Moving on: %s' %
                          (p.name, e.__str__()))
             logger.exception(e)
Пример #20
0
    def post(self, request, command):
        if (command == 'bootstrap'):
            try:
                device_scan()
            except Exception, e:
                e_msg = ('Unabled to scan disk drives on the system.')
                logger.error(e_msg)
                logger.exception(e)
                handle_exception(Exception(e_msg), request)

            for pool in Pool.objects.all():
                disk = Disk.objects.filter(pool=pool)[0].name
                try:
                    mount_root(pool, '/dev/%s' % disk)
                except Exception, e:
                    e_msg = ('Unable to mount a pool(%s) during bootstrap.' %
                             pool.name)
                    logger.exception(e)
Пример #21
0
 def _btrfs_disk_import(self, dname, request):
     try:
         disk = self._validate_disk(dname, request)
         p_info = get_pool_info(dname)
         #get some options from saved config?
         po = Pool(name=p_info['label'], raid="unknown")
         for d in p_info['disks']:
             do = Disk.objects.get(name=d)
             do.pool = po
             do.save()
             mount_root(po, d)
         po.raid = pool_raid('%s%s' % (settings.MNT_PT, po.name))['data']
         po.size = pool_usage('%s%s' % (settings.MNT_PT, po.name))[0]
         po.save()
         return Response(DiskInfoSerializer(disk).data)
     except Exception, e:
         e_msg = ('Failed to import any pool on this device(%s). Error: %s'
                  % (dname, e.__str__()))
         handle_exception(Exception(e_msg), request)
Пример #22
0
 def _refresh_pool_state():
     for p in Pool.objects.all():
         # If our pool has no disks, detached included, then delete it.
         # We leave pools with all detached members in place intentionally.
         if (p.disk_set.count() == 0):
             p.delete()
             continue
         # Log if no attached members are found, ie all devs are detached.
         if p.disk_set.attached().count() == 0:
             logger.error('Skipping Pool (%s) mount as there '
                          'are no attached devices. Moving on.' %
                          p.name)
             continue
         try:
             # Get and save what info we can prior to mount.
             first_attached_dev = p.disk_set.attached().first()
             is_root_pool = (p.role == 'root')
             # Observe any redirect role by using target_name.
             byid_disk_name, is_byid = get_dev_byid_name(
                 get_device_path(first_attached_dev.target_name))
             if is_byid:
                 pool_info = get_pool_info(first_attached_dev.target_name,
                                           is_root_pool)
                 pool_name = pool_info['label']
             else:
                 logger.error('Skipping pool ({}) mount as attached disk '
                              '({}) has no by-id name (no serial # ?)'.
                              format(p.name,
                                     first_attached_dev.target_name))
                 continue
             p.name = pool_name
             p.save()
             mount_root(p)
             p.raid = pool_raid('%s%s' % (settings.MNT_PT, p.name))['data']
             p.size = p.usage_bound()
             # Consider using mount_status() parse to update root pool db on
             # active (fstab initiated) compression setting.
             p.save()
         except Exception as e:
             logger.error('Exception while refreshing state for '
                          'Pool(%s). Moving on: %s' %
                          (p.name, e.__str__()))
             logger.exception(e)
Пример #23
0
 def _create_root_pool(self, d):
     p = Pool(name=settings.ROOT_POOL, raid='single')
     p.disk_set.add(d)
     p.save()
     d.pool = p
     d.save()
     p.size = pool_usage(mount_root(p))[0]
     enable_quota(p)
     p.uuid = btrfs_uuid(d.name)
     p.save()
     return p
Пример #24
0
 def _btrfs_disk_import(self, dname, request):
     try:
         disk = self._validate_disk(dname, request)
         p_info = get_pool_info(dname)
         #get some options from saved config?
         po = Pool(name=p_info['label'], raid="unknown")
         for d in p_info['disks']:
             do = Disk.objects.get(name=d)
             do.pool = po
             do.save()
             mount_root(po, d)
         po.raid = pool_raid('%s%s' % (settings.MNT_PT, po.name))['data']
         po.size = pool_usage('%s%s' % (settings.MNT_PT, po.name))[0]
         po.save()
         return Response(DiskInfoSerializer(disk).data)
     except Exception, e:
         e_msg = (
             'Failed to import any pool on this device(%s). Error: %s' %
             (dname, e.__str__()))
         handle_exception(Exception(e_msg), request)
Пример #25
0
 def _refresh_pool_state():
     for p in Pool.objects.all():
         # If our pool has no disks, detached included, then delete it.
         # We leave pools with all detached members in place intentionally.
         if (p.disk_set.count() == 0):
             p.delete()
             continue
         # Log if no attached members are found, ie all devs are detached.
         if p.disk_set.attached().count() == 0:
             logger.error('Skipping Pool (%s) mount as there '
                          'are no attached devices. Moving on.' % p.name)
             continue
         try:
             # Get and save what info we can prior to mount.
             first_attached_dev = p.disk_set.attached().first()
             is_root_pool = (p.role == 'root')
             # Observe any redirect role by using target_name.
             byid_disk_name, is_byid = get_dev_byid_name(
                 get_device_path(first_attached_dev.target_name))
             if is_byid:
                 pool_info = get_pool_info(first_attached_dev.target_name,
                                           is_root_pool)
                 pool_name = pool_info['label']
             else:
                 logger.error(
                     'Skipping pool ({}) mount as attached disk '
                     '({}) has no by-id name (no serial # ?)'.format(
                         p.name, first_attached_dev.target_name))
                 continue
             p.name = pool_name
             p.save()
             mount_root(p)
             p.raid = pool_raid('%s%s' % (settings.MNT_PT, p.name))['data']
             p.size = p.usage_bound()
             # Consider using mount_status() parse to update root pool db on
             # active (fstab initiated) compression setting.
             p.save()
         except Exception as e:
             logger.error('Exception while refreshing state for '
                          'Pool(%s). Moving on: %s' % (p.name, e.__str__()))
             logger.exception(e)
Пример #26
0
    def post(self, request, command):
        if (command == 'bootstrap'):
            for pool in Pool.objects.all():
                try:
                    mount_root(pool)
                except Exception, e:
                    e_msg = ('Exception while mounting a pool(%s) during '
                             'bootstrap: %s' % (pool.name, e.__str__()))
                    logger.error(e_msg)

            for share in Share.objects.all():
                try:
                    if (share.pqgroup == settings.MODEL_DEFS['pqgroup']):
                        share.pqgroup = qgroup_create(share.pool)
                        share.save()
                    if (not is_share_mounted(share.name)):
                        mnt_pt = ('%s%s' % (settings.MNT_PT, share.name))
                        mount_share(share, mnt_pt)
                except Exception, e:
                    e_msg = ('Exception while mounting a share(%s) during '
                             'bootstrap: %s' % (share.name, e.__str__()))
                    logger.error(e_msg)
Пример #27
0
def btrfs_add_pool(pool):
    disks = btrfs_disk_scan()
    disks_pool = []
    for disk in pool["disks"]:
        for disk_d in disks:
            if disk == disk_d["name"]:
                disks_pool.append(disk_d)
    dnames = [d["name"] for d in disks_pool]
    pool["disks"] = disks_pool
    add_pool(pool, dnames)
    pool["size"] = pool_usage(mount_root(pool))[0]
    pool["uuid"] = btrfs_uuid(dnames[0])
    return pool
Пример #28
0
 def _balance_start(self, pool, force=False, convert=None):
     mnt_pt = mount_root(pool)
     start_balance. async (mnt_pt, force=force, convert=convert)
     tid = 0
     count = 0
     while (tid == 0 and count < 25):
         for t in Task.objects.all():
             if (pickle.loads(t.args)[0] == mnt_pt):
                 tid = t.uuid
         time.sleep(0.2)
         count += 1
     logger.debug('balance tid = %s' % tid)
     return tid
Пример #29
0
 def _balance_start(self, pool, force=False, convert=None):
     mnt_pt = mount_root(pool)
     start_balance.async(mnt_pt, force=force, convert=convert)
     tid = 0
     count = 0
     while (tid == 0 and count < 25):
         for t in Task.objects.all():
             if (pickle.loads(t.args)[0] == mnt_pt):
                 tid = t.uuid
         time.sleep(0.2)
         count += 1
     logger.debug('balance tid = %s' % tid)
     return tid
Пример #30
0
def btrfs_add_pool(pool):
	disks = btrfs_disk_scan()
	disks_pool = []
	for disk in pool["disks"]:
		for disk_d in disks:
			if disk == disk_d["name"]:
				disks_pool.append(disk_d)
	dnames = [d["name"] for d in disks_pool]
	pool["disks"] = disks_pool
	add_pool(pool,dnames)
	pool["size"] = pool_usage(mount_root(pool))[0]
	pool["uuid"] = btrfs_uuid(dnames[0])
	return pool
Пример #31
0
 def _update_disk_state():
     disks = scan_disks(settings.MIN_DISK_SIZE)
     for d in disks:
         dob = None
         if (Disk.objects.filter(name=d.name).exists()):
             dob = Disk.objects.get(name=d.name)
             dob.serial = d.serial
         elif (Disk.objects.filter(serial=d.serial).exists()):
             dob = Disk.objects.get(serial=d.serial)
             dob.name = d.name
         else:
             dob = Disk(name=d.name, size=d.size, parted=d.parted,
                        btrfs_uuid=d.btrfs_uuid, model=d.model,
                        serial=d.serial, transport=d.transport,
                        vendor=d.vendor)
         dob.size = d.size
         dob.parted = d.parted
         dob.offline = False
         dob.model = d.model
         dob.transport = d.transport
         dob.vendor = d.vendor
         dob.btrfs_uuid = d.btrfs_uuid
         if (d.fstype is not None and d.fstype != 'btrfs'):
             dob.btrfs_uuid = None
             dob.parted = True
         if (Pool.objects.filter(name=d.label).exists()):
             dob.pool = Pool.objects.get(name=d.label)
         else:
             dob.pool = None
         if (dob.pool is None and d.root is True):
             p = Pool(name=settings.ROOT_POOL, raid='single')
             p.disk_set.add(dob)
             p.save()
             dob.pool = p
             dob.save()
             p.size = pool_usage(mount_root(p))[0]
             enable_quota(p)
             p.uuid = btrfs_uuid(dob.name)
             p.save()
         dob.save()
     for do in Disk.objects.all():
         if (do.name not in [d.name for d in disks]):
             do.offline = True
         else:
             try:
                 # for non ata/sata drives
                 do.smart_available, do.smart_enabled = smart.available(do.name)
             except Exception, e:
                 logger.exception(e)
                 do.smart_available = do.smart_enabled = False
         do.save()
Пример #32
0
    def post(self, request, command):
        if (command == 'bootstrap'):

            for pool in Pool.objects.all():
                try:
                    mount_root(pool)
                except Exception, e:
                    e_msg = ('Exception while mounting a pool(%s) during '
                             'bootstrap: %s' % (pool.name, e.__str__()))
                    logger.error(e_msg)

            for share in Share.objects.all():
                try:
                    if (share.pqgroup == settings.MODEL_DEFS['pqgroup']):
                        share.pqgroup = qgroup_create(share.pool)
                        share.save()
                    if (not is_share_mounted(share.name)):
                        mnt_pt = ('%s%s' % (settings.MNT_PT, share.name))
                        mount_share(share, mnt_pt)
                except Exception, e:
                    e_msg = ('Exception while mounting a share(%s) during '
                             'bootstrap: %s' % (share.name, e.__str__()))
                    logger.error(e_msg)
Пример #33
0
 def _balance_start(self, pool, force=False, convert=None):
     mnt_pt = mount_root(pool)
     if convert is None and pool.raid == "single":
         # Btrfs balance without convert filters will convert dup level
         # metadata on a single level data pool to raid1 on multi disk
         # pools. Avoid by explicit convert in this instance.
         logger.info(
             "Preserve single data, dup metadata by explicit convert.")
         convert = "single"
     task_result_handle = start_balance(mnt_pt,
                                        force=force,
                                        convert=convert)
     tid = task_result_handle.id
     logger.debug("balance tid = ({}).".format(tid))
     return tid
Пример #34
0
 def _balance_start(self, pool, force=False, convert=None):
     mnt_pt = mount_root(pool)
     if convert is None and pool.raid == "single":
         # Btrfs balance without convert filters will convert dup level
         # metadata on a single level data pool to raid1 on multi disk
         # pools. Avoid by explicit convert in this instance.
         logger.info(
             "Preserve single data, dup metadata by explicit convert.")
         convert = "single"
     start_balance. async (mnt_pt, force=force, convert=convert)
     tid = 0
     count = 0
     while tid == 0 and count < 25:
         for t in Task.objects.all():
             if pickle.loads(t.args)[0] == mnt_pt:
                 tid = t.uuid
         time.sleep(0.2)  # 200 milliseconds
         count += 1
     logger.debug("balance tid = ({}).".format(tid))
     return tid
Пример #35
0
 def _balance_start(self, pool, force=False, convert=None):
     mnt_pt = mount_root(pool)
     if convert is None and pool.raid == 'single':
         # Btrfs balance without convert filters will convert dup level
         # metadata on a single level data pool to raid1 on multi disk
         # pools. Avoid by explicit convert in this instance.
         logger.info('Preserve single data, dup metadata by explicit '
                     'convert.')
         convert = 'single'
     start_balance.async(mnt_pt, force=force, convert=convert)
     tid = 0
     count = 0
     while (tid == 0 and count < 25):
         for t in Task.objects.all():
             if (pickle.loads(t.args)[0] == mnt_pt):
                 tid = t.uuid
         time.sleep(0.2)
         count += 1
     logger.debug('balance tid = ({}).'.format(tid))
     return tid
Пример #36
0
def main():
    for p in Pool.objects.all():
        try:
            print("Processing pool(%s)" % p.name)
            mnt_pt = mount_root(p)
            o, e, rc = run_command([BTRFS, "subvol", "list", mnt_pt])
            subvol_ids = []
            for l in o:
                if re.match("ID ", l) is not None:
                    subvol_ids.append(l.split()[1])

            o, e, rc = run_command([BTRFS, "qgroup", "show", mnt_pt],
                                   throw=False)
            if rc != 0:
                print("Quotas not enabled on pool(%s). Skipping it." % p.name)
                continue

            qgroup_ids = []
            for l in o:
                if re.match("0/", l) is not None:
                    q = l.split()[0].split("/")[1]
                    if q == "5":
                        continue
                    qgroup_ids.append(l.split()[0].split("/")[1])

            for q in qgroup_ids:
                if q not in subvol_ids:
                    print("qgroup %s not in use. deleting" % q)
                    run_command(
                        [BTRFS, "qgroup", "destroy",
                         "0/%s" % q, mnt_pt])
                else:
                    print("qgroup %s is in use. Moving on." % q)
            print("Finished processing pool(%s)" % p.name)
        except Exception as e:
            print("Exception while qgroup-cleanup of Pool(%s): %s" %
                  (p.name, e.__str__()))
Пример #37
0
    def post(self, request, uuid):
        """
        import a pool with given uuid
        """
        disks = Disk.objects.filter(btrfs_uuid=uuid)

        if (not btrfs_importable(disks[0].name)):
            e_msg = ('btrfs check failed on device: %s Cannot automatically '
                     'import the pool with uuid: %s' % (disks[0].name, uuid))
            handle_exception(Exception(e_msg), request)

        #get name of the pool
        pname = btrfs_label(uuid)

        #mount the pool
        mount_root(pname, '/dev/%s' % disks[0].name)
        pool_mnt_pt = '%s/%s' % (settings.MNT_PT, pname)

        #get raid level
        raid_level = btrfs_raid_level(pname)
        if (raid_level is None):
            umount_root(pool_mnt_pt)
            e_msg = ('Problem while probing for the raid level of the pool.'
                     'Cannot automatically import the pool with uuid: %s' %
                     uuid)
            handle_exception(Exception(e_msg), request)

        #check for shares in the pool
        subvols, e, rc = subvol_list_helper(pool_mnt_pt)
        snap_list = snapshot_list(pool_mnt_pt)
        share_map = {}
        for s in subvols:
            s_fields = s.split()
            if (s_fields[-1] not in snap_list):
                share_map[s_fields[-1]] = s_fields[1]

        entries = os.listdir(pool_mnt_pt)
        e_msg_prefix = ('Only btrfs filesystem with nothing but subvolumes in '
                        'it can be imported.')
        for e in entries:
            if (os.path.isfile('%s/%s' % (pool_mnt_pt, e))):
                e_msg = ('%s Unexpected file %s found. Due to this reason, '
                         'pool with uuid: %s cannot be imported' %
                         (e_msg_prefix, e, uuid))
                handle_exception(Exception(e_msg), request)
            elif (e not in share_map):
                e_msg = ('%s Unexpected directory %s found. Due to this '
                         'reason, pool with uuid: %s cannot be imported' %
                         (e_msg_prefix, e, uuid))
                handle_exception(Exception(e_msg), request)

        #add pool model
        pool_size = self._pool_size(disks, raid_level)
        p = Pool(name=pname, raid=raid_level, size=pool_size, uuid=uuid)
        p.save()

        #import shares
        for s in share_map.keys():
            so = Share(pool=p,
                       qgroup='0/%s' % share_map[s],
                       name=s,
                       size=qgroup_size,
                       subvol_name=s,
                       replica=False)
            so.save()

            #import snapshots?
            for snap in snap_list:
                snap_fields = snap.split('_')
                snap_name = snap_fields[-1]
                sname = '_'.join(snap_fields[0:-1])
                if (sname == s):
                    snapo = Snapshot(share=so,
                                     name=snap_name,
                                     real_name=snap,
                                     qgroup=qgroup_id)
                    snapo.save()
Пример #38
0
        if (command == 'disable-auto-update'):
            try:
                auto_update(enable=False)
                return Response({'enabled': False, })
            except Exception, e:
                msg = ('Failed to disable auto update due to this exception:  '
                       '%s' % e.__str__())
                handle_exception(Exception(msg), request)

        if (command == 'refresh-pool-state'):
            for p in Pool.objects.all():
                fd = p.disk_set.first()
                if (fd is None):
                    p.delete()
                mount_root(p)
                pool_info = get_pool_info(fd.name)
                p.name = pool_info['label']
                p.raid = pool_raid('%s%s' % (settings.MNT_PT, p.name))['data']
                p.size = pool_usage('%s%s' % (settings.MNT_PT, p.name))[0]
                p.save()
            return Response()

        if (command == 'refresh-share-state'):
            for p in Pool.objects.all():
                import_shares(p, request)
            return Response()

        if (command == 'refresh-snapshot-state'):
            for share in Share.objects.all():
                import_snapshots(share)
Пример #39
0
    def post(self, request):
        """
        input is a list of disks, raid_level and name of the pool.
        """
        with self._handle_exception(request):
            disks = [self._validate_disk(d, request) for d in
                     request.data.get('disks')]
            pname = request.data['pname']
            if (re.match('%s$' % settings.POOL_REGEX, pname) is None):
                e_msg = ('Pool name must start with a alphanumeric(a-z0-9) '
                         'character and can be followed by any of the '
                         'following characters: letter(a-z), digits(0-9), '
                         'hyphen(-), underscore(_) or a period(.).')
                handle_exception(Exception(e_msg), request)

            if (len(pname) > 255):
                e_msg = ('Pool name must be less than 255 characters')
                handle_exception(Exception(e_msg), request)

            if (Pool.objects.filter(name=pname).exists()):
                e_msg = ('Pool(%s) already exists. Choose a different name' % pname)
                handle_exception(Exception(e_msg), request)

            if (Share.objects.filter(name=pname).exists()):
                e_msg = ('A Share with this name(%s) exists. Pool and Share names '
                         'must be distinct. Choose a different name' % pname)
                handle_exception(Exception(e_msg), request)

            for d in disks:
                if (d.btrfs_uuid is not None):
                    e_msg = ('Another BTRFS filesystem exists on this '
                             'disk(%s). Erase the disk and try again.'
                             % d.name)
                    handle_exception(Exception(e_msg), request)

            raid_level = request.data['raid_level']
            if (raid_level not in self.RAID_LEVELS):
                e_msg = ('Unsupported raid level. use one of: {}'.format(self.RAID_LEVELS))
                handle_exception(Exception(e_msg), request)
            # consolidated raid0 & raid 1 disk check
            if (raid_level in self.RAID_LEVELS[1:3] and len(disks) <= 1):
                e_msg = ('At least two disks are required for the raid level: '
                         '%s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[3]):
                if (len(disks) < 4):
                    e_msg = ('A minimum of Four drives are required for the '
                             'raid level: %s' % raid_level)
                    handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[4] and len(disks) < 2):
                e_msg = ('Two or more disks are required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[5] and len(disks) < 3):
                e_msg = ('Three or more disks are required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)

            compression = self._validate_compression(request)
            mnt_options = self._validate_mnt_options(request)
            dnames = [d.name for d in disks]
            p = Pool(name=pname, raid=raid_level, compression=compression,
                     mnt_options=mnt_options)
            p.disk_set.add(*disks)
            p.save()
            # added for loop to save disks
            # appears p.disk_set.add(*disks) was not saving disks in test environment
            for d in disks:
                d.pool = p
                d.save()
            add_pool(p, dnames)
            p.size = pool_usage(mount_root(p))[0]
            p.uuid = btrfs_uuid(dnames[0])
            p.save()
            return Response(PoolInfoSerializer(p).data)
Пример #40
0
    def _update_disk_state():
        """
        A db atomic method to update the database of attached disks / drives.
        Works only on device serial numbers for drive identification.
        Calls scan_disks to establish the current connected drives info.
        Initially removes duplicate by serial number db entries to deal
        with legacy db states and obfuscates all previous device names as they
        are transient. The drive database is then updated with the attached
        disks info and previously known drives no longer found attached are
        marked as offline. All offline drives have their SMART availability and
        activation status removed and all attached drives have their SMART
        availability assessed and activated if available.
        :return: serialized models of attached and missing disks via serial num
        """
        # Acquire a list (namedtupil collection) of attached drives > min size
        disks = scan_disks(settings.MIN_DISK_SIZE)
        serial_numbers_seen = []
        # Make sane our db entries in view of what we know we have attached.
        # Device serial number is only known external unique entry, scan_disks
        # make this so in the case of empty or repeat entries by providing
        # fake serial numbers which are in turn flagged via WebUI as unreliable.
        # 1) scrub all device names with unique but nonsense uuid4
        # 1) mark all offline disks as such via db flag
        # 2) mark all offline disks smart available and enabled flags as False
        # logger.info('update_disk_state() Called')
        for do in Disk.objects.all():
            # Replace all device names with a unique placeholder on each scan
            # N.B. do not optimize by re-using uuid index as this could lead
            # to a non refreshed webui acting upon an entry that is different
            # from that shown to the user.
            do.name = 'detached-' + str(uuid.uuid4()).replace('-', '')
            # Delete duplicate or fake by serial number db disk entries.
            # It makes no sense to save fake serial number drives between scans
            # as on each scan the serial number is re-generated (fake) anyway.
            # Serial numbers beginning with 'fake-serial-' are from scan_disks.
            if (do.serial in serial_numbers_seen) or (
                    re.match('fake-serial-', do.serial) is not None):
                logger.info('Deleting duplicate or fake (by serial) Disk db '
                            'entry. Serial = %s' % do.serial)
                do.delete()  # django >=1.9 returns a dict of deleted items.
                # Continue onto next db disk object as nothing more to process.
                continue
            # first encounter of this serial in the db so stash it for reference
            serial_numbers_seen.append(deepcopy(do.serial))
            # Look for devices (by serial number) that are in the db but not in
            # our disk scan, ie offline / missing.
            if (do.serial not in [d.serial for d in disks]):
                # update the db entry as offline
                do.offline = True
                # disable S.M.A.R.T available and enabled flags.
                do.smart_available = do.smart_enabled = False
            do.save()  # make sure all updates are flushed to db
        # Our db now has no device name info as all dev names are place holders.
        # Iterate over attached drives to update the db's knowledge of them.
        # Kernel dev names are unique so safe to overwrite our db unique name.
        for d in disks:
            # start with an empty disk object
            dob = None
            # Convert our transient but just scanned so current sda type name
            # to a more useful by-id type name as found in /dev/disk/by-id
            byid_disk_name, is_byid = get_dev_byid_name(d.name, True)
            # If the db has an entry with this disk's serial number then
            # use this db entry and update the device name from our recent scan.
            if (Disk.objects.filter(serial=d.serial).exists()):
                dob = Disk.objects.get(serial=d.serial)
                #dob.name = d.name
                dob.name = byid_disk_name
            else:
                # We have an assumed new disk entry as no serial match in db.
                # Build a new entry for this disk.
                #dob = Disk(name=d.name, serial=d.serial)
                # N.B. we may want to force a fake-serial here if is_byid False,
                # that way we flag as unusable disk as no by-id type name found.
                # It may already have been set though as the only by-id
                # failures so far are virtio disks with no serial so scan_disks
                # will have already given it a fake serial in d.serial.
                dob = Disk(name=byid_disk_name, serial=d.serial)
            # Update the db disk object (existing or new) with our scanned info
            dob.size = d.size
            dob.parted = d.parted
            dob.offline = False  # as we are iterating over attached devices
            dob.model = d.model
            dob.transport = d.transport
            dob.vendor = d.vendor
            dob.btrfs_uuid = d.btrfs_uuid
            # If attached disk has an fs and it isn't btrfs
            if (d.fstype is not None and d.fstype != 'btrfs'):
                dob.btrfs_uuid = None
                dob.parted = True  # overload use of parted as non btrfs flag.
                # N.B. this overload use may become redundant with the addition
                # of the Disk.role field.
            # Update the role field with scan_disks findings, currently only
            # mdraid membership type based on fstype info. In the case of
            # these raid member indicators from scan_disks() we have the
            # current truth provided so update the db role status accordingly.
            # N.B. this if else could be expanded to accommodate other
            # roles based on the fs found
            if d.fstype == 'isw_raid_member' or d.fstype == 'linux_raid_member':
                # We have an indicator of mdraid membership so update existing
                # role info if any.
                # N.B. We have a minor legacy issue in that prior to using json
                # format for the db role field we stored one of 2 strings.
                # if these 2 strings are found then ignore them as we then
                # overwrite with our current finding and in the new json format.
                # I.e. non None could also be a legacy entry so follow overwrite
                # path when legacy entry found by treating as a None entry.
                # TODO: When we reset migrations the following need only check
                # TODO: "dob.role is not None"
                if dob.role is not None and dob.role != 'isw_raid_member' \
                        and dob.role != 'linux_raid_member':
                    # get our known roles into a dictionary
                    known_roles = json.loads(dob.role)
                    # create or update an mdraid dictionary entry
                    known_roles['mdraid'] = str(d.fstype)
                    # return updated dict to json format and store in db object
                    dob.role = json.dumps(known_roles)
                else:  # We have a dob.role = None so just insert our new role.
                    # Also applies to legacy pre json role entries.
                    dob.role = '{"mdraid": "' + d.fstype + '"}'  # json string
            else:  # We know this disk is not an mdraid raid member.
                # No identified role from scan_disks() fstype value (mdraid
                # only for now )so we preserve any prior known roles not
                # exposed by scan_disks but remove the mdraid role if found.
                # TODO: When we reset migrations the following need only check
                # TODO: "dob.role is not None"
                if dob.role is not None and dob.role != 'isw_raid_member' \
                        and dob.role != 'linux_raid_member':
                    # remove mdraid role if found but preserve prior roles
                    # which should now only be in json format
                    known_roles = json.loads(dob.role)
                    if 'mdraid' in known_roles:
                        if len(known_roles) > 1:
                            # mdraid is not the only entry so we have to pull
                            # out only mdraid from dict and convert back to json
                            del known_roles['mdraid']
                            dob.role = json.dumps(known_roles)
                        else:
                            # mdraid was the only entry so we need not bother
                            # with dict edit and json conversion only to end up
                            # with an empty json {} so revert to default 'None'.
                            dob.role = None
                else:  # Empty or legacy role entry.
                    # We have either None or a legacy mdraid role when this disk
                    # is no longer an mdraid member. We can now assert None.
                    dob.role = None
            # If our existing Pool db knows of this disk's pool via it's label:
            if (Pool.objects.filter(name=d.label).exists()):
                # update the disk db object's pool field accordingly.
                dob.pool = Pool.objects.get(name=d.label)

                #this is for backwards compatibility. root pools created
                #before the pool.role migration need this. It can safely be
                #removed a few versions after 3.8-11 or when we reset migrations.
                if (d.root is True):
                    dob.pool.role = 'root'
                    dob.pool.save()
            else:  # this disk is not known to exist in any pool via it's label
                dob.pool = None
            # If no pool has yet been found with this disk's label in and
            # the attached disk is our root disk (flagged by scan_disks)
            if (dob.pool is None and d.root is True):
                # setup our special root disk db entry in Pool
                # TODO: dynamically retrieve raid level.
                p = Pool(name=d.label, raid='single', role='root')
                p.disk_set.add(dob)
                p.save()
                # update disk db object to reflect special root pool status
                dob.pool = p
                dob.save()
                p.size = pool_usage(mount_root(p))[0]
                enable_quota(p)
                p.uuid = btrfs_uuid(dob.name)
                p.save()
            # save our updated db disk object
            dob.save()
        # Update online db entries with S.M.A.R.T availability and status.
        for do in Disk.objects.all():
            # find all the not offline db entries
            if (not do.offline):
                # We have an attached disk db entry.
                # Since our Disk.name model now uses by-id type names we can
                # do cheap matches to the beginnings of these names to find
                # virtio, md, or sdcard devices which are assumed to have no
                # SMART capability.
                # We also disable devices smart support when they have a
                # fake serial number as ascribed by scan_disks as any SMART
                # data collected is then less likely to be wrongly associated
                # with the next device that takes this temporary drive's name.
                # Also note that with no serial number some device types will
                # not have a by-id type name expected by the smart subsystem.
                # This has only been observed in no serial virtio devices.
                if (re.match('fake-serial-', do.serial) is not None) or \
                        (re.match('virtio-|md-|mmc-|nvme-', do.name) is not None):
                    # Virtio disks (named virtio-*), md devices (named md-*),
                    # and an sdcard reader that provides devs named mmc-* have
                    # no smart capability so avoid cluttering logs with
                    # exceptions on probing these with smart.available.
                    # nvme not yet supported by CentOS 7 smartmontools:
                    # https://www.smartmontools.org/ticket/657
                    # Thanks to @snafu in rockstor forum post 1567 for this.
                    do.smart_available = do.smart_enabled = False
                    continue
                # try to establish smart availability and status and update db
                try:
                    # for non ata/sata drives
                    do.smart_available, do.smart_enabled = smart.available(
                        do.name, do.smart_options)
                except Exception, e:
                    logger.exception(e)
                    do.smart_available = do.smart_enabled = False
            do.save()
Пример #41
0
 def _update_disk_state():
     """
     A db atomic method to update the database of attached disks / drives.
     Works only on device serial numbers for drive identification.
     Calls scan_disks to establish the current connected drives info.
     Initially removes duplicate by serial number db entries to deal
     with legacy db states and obfuscates all previous device names as they
     are transient. The drive database is then updated with the attached
     disks info and previously known drives no longer found attached are
     marked as offline. All offline drives have their SMART availability and
     activation status removed and all attached drives have their SMART
     availability assessed and activated if available.
     :return: serialized models of attached and missing disks via serial num
     """
     # Acquire a list (namedtupil collection) of attached drives > min size
     disks = scan_disks(settings.MIN_DISK_SIZE)
     serial_numbers_seen = []
     # Make sane our db entries in view of what we know we have attached.
     # Device serial number is only known external unique entry, scan_disks
     # make this so in the case of empty or repeat entries by providing
     # fake serial numbers which are in turn flagged via WebUI as unreliable.
     # 1) scrub all device names with unique but nonsense uuid4
     # 1) mark all offline disks as such via db flag
     # 2) mark all offline disks smart available and enabled flags as False
     logger.info('update_disk_state() Called')
     for do in Disk.objects.all():
         # Replace all device names with a unique placeholder on each scan
         # N.B. do not optimize by re-using uuid index as this could lead
         # to a non refreshed webui acting upon an entry that is different
         # from that shown to the user.
         do.name = str(uuid.uuid4()).replace('-', '')  # 32 chars long
         # Delete duplicate or fake by serial number db disk entries.
         # It makes no sense to save fake serial number drives between scans
         # as on each scan the serial number is re-generated anyway.
         if (do.serial in serial_numbers_seen) or (len(do.serial) == 48):
             logger.info('Deleting duplicate or fake (by serial) Disk db '
                         'entry. Serial = %s' % do.serial)
             do.delete()  # django >=1.9 returns a dict of deleted items.
             # Continue onto next db disk object as nothing more to process.
             continue
         # first encounter of this serial in the db so stash it for reference
         serial_numbers_seen.append(deepcopy(do.serial))
         # Look for devices (by serial number) that are in the db but not in
         # our disk scan, ie offline / missing.
         if (do.serial not in [d.serial for d in disks]):
             # update the db entry as offline
             do.offline = True
             # disable S.M.A.R.T available and enabled flags.
             do.smart_available = do.smart_enabled = False
         do.save()  # make sure all updates are flushed to db
     # Our db now has no device name info as all dev names are place holders.
     # Iterate over attached drives to update the db's knowledge of them.
     # Kernel dev names are unique so safe to overwrite our db unique name.
     for d in disks:
         # start with an empty disk object
         dob = None
         # If the db has an entry with this disk's serial number then
         # use this db entry and update the device name from our recent scan.
         if (Disk.objects.filter(serial=d.serial).exists()):
             dob = Disk.objects.get(serial=d.serial)
             dob.name = d.name
         else:
             # We have an assumed new disk entry as no serial match in db.
             # Build a new entry for this disk.
             dob = Disk(name=d.name, serial=d.serial)
         # Update the db disk object (existing or new) with our scanned info
         dob.size = d.size
         dob.parted = d.parted
         dob.offline = False  # as we are iterating over attached devices
         dob.model = d.model
         dob.transport = d.transport
         dob.vendor = d.vendor
         dob.btrfs_uuid = d.btrfs_uuid
         # If attached disk has an fs and it isn't btrfs
         if (d.fstype is not None and d.fstype != 'btrfs'):
             dob.btrfs_uuid = None
             dob.parted = True
         # If our existing Pool db knows of this disk's pool via it's label:
         if (Pool.objects.filter(name=d.label).exists()):
             # update the disk db object's pool field accordingly.
             dob.pool = Pool.objects.get(name=d.label)
         else:  # this disk is not known to exist in any pool via it's label
             dob.pool = None
         # If no pool has yet been found with this disk's label in and
         # the attached disk is our root disk (flagged by scan_disks)
         if (dob.pool is None and d.root is True):
             # setup our special root disk db entry in Pool
             p = Pool(name=settings.ROOT_POOL, raid='single')
             p.disk_set.add(dob)
             p.save()
             # update disk db object to reflect special root pool status
             dob.pool = p
             dob.save()
             p.size = pool_usage(mount_root(p))[0]
             enable_quota(p)
             p.uuid = btrfs_uuid(dob.name)
             p.save()
         # save our updated db disk object
         dob.save()
     # Update online db entries with S.M.A.R.T availability and status.
     for do in Disk.objects.all():
         # find all the not offline db entries
         if (not do.offline):
             # We have an attached disk db entry
             if re.match('vd', do.name):
                 # Virtio disks (named vd*) have no smart capability.
                 # avoids cluttering logs with exceptions on these devices.
                 do.smart_available = do.smart_enabled = False
                 continue
             # try to establish smart availability and status and update db
             try:
                 # for non ata/sata drives
                 do.smart_available, do.smart_enabled = smart.available(
                     do.name)
             except Exception, e:
                 logger.exception(e)
                 do.smart_available = do.smart_enabled = False
         do.save()
Пример #42
0
    def post(self, request, uuid):
        """
        import a pool with given uuid
        """
        disks = Disk.objects.filter(btrfs_uuid=uuid)

        if (not btrfs_importable(disks[0].name)):
            e_msg = ('btrfs check failed on device: %s Cannot automatically '
                     'import the pool with uuid: %s' % (disks[0].name, uuid))
            handle_exception(Exception(e_msg), request)


        #get name of the pool
        pname = btrfs_label(uuid)

        #mount the pool
        mount_root(pname, '/dev/%s' % disks[0].name)
        pool_mnt_pt = '%s/%s' % (settings.MNT_PT, pname)

        #get raid level
        raid_level = btrfs_raid_level(pname)
        if (raid_level is None):
            umount_root(pool_mnt_pt)
            e_msg = ('Problem while probing for the raid level of the pool.'
                     'Cannot automatically import the pool with uuid: %s' %
                     uuid)
            handle_exception(Exception(e_msg), request)

        #check for shares in the pool
        subvols, e, rc = subvol_list_helper(pool_mnt_pt)
        snap_list = snapshot_list(pool_mnt_pt)
        share_map = {}
        for s in subvols:
            s_fields = s.split()
            if (s_fields[-1] not in snap_list):
                share_map[s_fields[-1]] = s_fields[1]

        entries = os.listdir(pool_mnt_pt)
        e_msg_prefix = ('Only btrfs filesystem with nothing but subvolumes in '
                        'it can be imported.')
        for e in entries:
            if (os.path.isfile('%s/%s' % (pool_mnt_pt, e))):
                e_msg = ('%s Unexpected file %s found. Due to this reason, '
                         'pool with uuid: %s cannot be imported' %
                         (e_msg_prefix, e, uuid))
                handle_exception(Exception(e_msg), request)
            elif (e not in share_map):
                e_msg = ('%s Unexpected directory %s found. Due to this '
                         'reason, pool with uuid: %s cannot be imported' %
                         (e_msg_prefix, e, uuid))
                handle_exception(Exception(e_msg), request)

        #add pool model
        pool_size = self._pool_size(disks, raid_level)
        p = Pool(name=pname, raid=raid_level, size=pool_size, uuid=uuid)
        p.save()

        #import shares
        for s in share_map.keys():
            so = Share(pool=p, qgroup='0/%s' % share_map[s], name=s,
                       size=qgroup_size, subvol_name=s, replica=False)
            so.save()

            #import snapshots?
            for snap in snap_list:
                snap_fields = snap.split('_')
                snap_name = snap_fields[-1]
                sname = '_'.join(snap_fields[0:-1])
                if (sname == s):
                    snapo = Snapshot(share=so, name=snap_name,
                                     real_name=snap, qgroup=qgroup_id)
                    snapo.save()
Пример #43
0
            try:
                auto_update(enable=False)
                return Response({
                    'enabled': False,
                })
            except Exception, e:
                msg = ('Failed to disable auto update due to this exception:  '
                       '%s' % e.__str__())
                handle_exception(Exception(msg), request)

        if (command == 'refresh-pool-state'):
            for p in Pool.objects.all():
                fd = p.disk_set.first()
                if (fd is None):
                    p.delete()
                mount_root(p)
                pool_info = get_pool_info(fd.name)
                p.name = pool_info['label']
                p.raid = pool_raid('%s%s' % (settings.MNT_PT, p.name))['data']
                p.size = pool_usage('%s%s' % (settings.MNT_PT, p.name))[0]
                p.save()
            return Response()

        if (command == 'refresh-share-state'):
            for p in Pool.objects.all():
                import_shares(p, request)
            return Response()

        if (command == 'refresh-snapshot-state'):
            for share in Share.objects.all():
                import_snapshots(share)
Пример #44
0
    def post(self, request):
        """
        input is a list of disks, raid_level and name of the pool.
        """
        with self._handle_exception(request):
            disks = [self._validate_disk(d, request) for d in request.data.get("disks")]
            pname = request.data["pname"]
            if re.match("%s$" % settings.POOL_REGEX, pname) is None:
                e_msg = (
                    "Pool name must start with a alphanumeric(a-z0-9) "
                    "character and can be followed by any of the "
                    "following characters: letter(a-z), digits(0-9), "
                    "hyphen(-), underscore(_) or a period(.)."
                )
                handle_exception(Exception(e_msg), request)

            if Pool.objects.filter(name=pname).exists():
                e_msg = "Pool(%s) already exists. Choose a different name" % pname
                handle_exception(Exception(e_msg), request)

            if Share.objects.filter(name=pname).exists():
                e_msg = (
                    "A Share with this name(%s) exists. Pool and Share names "
                    "must be distinct. Choose a different name" % pname
                )
                handle_exception(Exception(e_msg), request)

            for d in disks:
                if d.btrfs_uuid is not None:
                    e_msg = (
                        "Another BTRFS filesystem exists on this " "disk(%s). Erase the disk and try again." % d.name
                    )
                    handle_exception(Exception(e_msg), request)

            raid_level = request.data["raid_level"]
            if raid_level not in self.RAID_LEVELS:
                e_msg = "Unsupported raid level. use one of: {}".format(self.RAID_LEVELS)
                handle_exception(Exception(e_msg), request)
            # consolidated raid0 & raid 1 disk check
            if raid_level in self.RAID_LEVELS[1:3] and len(disks) <= 1:
                e_msg = "At least two disks are required for the raid level: " "%s" % raid_level
                handle_exception(Exception(e_msg), request)
            if raid_level == self.RAID_LEVELS[3]:
                if len(disks) < 4:
                    e_msg = "A minimum of Four drives are required for the " "raid level: %s" % raid_level
                    handle_exception(Exception(e_msg), request)
            if raid_level == self.RAID_LEVELS[4] and len(disks) < 2:
                e_msg = "Two or more disks are required for the raid " "level: %s" % raid_level
                handle_exception(Exception(e_msg), request)
            if raid_level == self.RAID_LEVELS[5] and len(disks) < 3:
                e_msg = "Three or more disks are required for the raid " "level: %s" % raid_level
                handle_exception(Exception(e_msg), request)

            compression = self._validate_compression(request)
            mnt_options = self._validate_mnt_options(request)
            dnames = [d.name for d in disks]
            p = Pool(name=pname, raid=raid_level, compression=compression, mnt_options=mnt_options)
            add_pool(p, dnames)
            p.size = pool_usage(mount_root(p, dnames[0]))[0]
            p.uuid = btrfs_uuid(dnames[0])
            p.disk_set.add(*disks)
            p.save()
            # added for loop to save disks
            # appears p.disk_set.add(*disks) was not saving disks in test environment
            for d in disks:
                d.pool = p
                d.save()
            return Response(PoolInfoSerializer(p).data)
Пример #45
0
 def _update_disk_state():
     """
     A db atomic method to update the database of attached disks / drives.
     Works only on device serial numbers for drive identification.
     Calls scan_disks to establish the current connected drives info.
     Initially removes duplicate by serial number db entries to deal
     with legacy db states and obfuscates all previous device names as they
     are transient. The drive database is then updated with the attached
     disks info and previously known drives no longer found attached are
     marked as offline. All offline drives have their SMART availability and
     activation status removed and all attached drives have their SMART
     availability assessed and activated if available.
     :return: serialized models of attached and missing disks via serial num
     """
     # Acquire a list (namedtupil collection) of attached drives > min size
     disks = scan_disks(settings.MIN_DISK_SIZE)
     serial_numbers_seen = []
     # Make sane our db entries in view of what we know we have attached.
     # Device serial number is only known external unique entry, scan_disks
     # make this so in the case of empty or repeat entries by providing
     # fake serial numbers which are in turn flagged via WebUI as unreliable.
     # 1) scrub all device names with unique but nonsense uuid4
     # 1) mark all offline disks as such via db flag
     # 2) mark all offline disks smart available and enabled flags as False
     logger.info('update_disk_state() Called')
     for do in Disk.objects.all():
         # Replace all device names with a unique placeholder on each scan
         # N.B. do not optimize by re-using uuid index as this could lead
         # to a non refreshed webui acting upon an entry that is different
         # from that shown to the user.
         do.name = str(uuid.uuid4()).replace('-', '')  # 32 chars long
         # Delete duplicate or fake by serial number db disk entries.
         # It makes no sense to save fake serial number drives between scans
         # as on each scan the serial number is re-generated anyway.
         if (do.serial in serial_numbers_seen) or (len(do.serial) == 48):
             logger.info('Deleting duplicate or fake (by serial) Disk db '
                         'entry. Serial = %s' % do.serial)
             do.delete()  # django >=1.9 returns a dict of deleted items.
             # Continue onto next db disk object as nothing more to process.
             continue
         # first encounter of this serial in the db so stash it for reference
         serial_numbers_seen.append(deepcopy(do.serial))
         # Look for devices (by serial number) that are in the db but not in
         # our disk scan, ie offline / missing.
         if (do.serial not in [d.serial for d in disks]):
             # update the db entry as offline
             do.offline = True
             # disable S.M.A.R.T available and enabled flags.
             do.smart_available = do.smart_enabled = False
         do.save()  # make sure all updates are flushed to db
     # Our db now has no device name info as all dev names are place holders.
     # Iterate over attached drives to update the db's knowledge of them.
     # Kernel dev names are unique so safe to overwrite our db unique name.
     for d in disks:
         # start with an empty disk object
         dob = None
         # If the db has an entry with this disk's serial number then
         # use this db entry and update the device name from our recent scan.
         if (Disk.objects.filter(serial=d.serial).exists()):
             dob = Disk.objects.get(serial=d.serial)
             dob.name = d.name
         else:
             # We have an assumed new disk entry as no serial match in db.
             # Build a new entry for this disk.
             dob = Disk(name=d.name, serial=d.serial)
         # Update the db disk object (existing or new) with our scanned info
         dob.size = d.size
         dob.parted = d.parted
         dob.offline = False  # as we are iterating over attached devices
         dob.model = d.model
         dob.transport = d.transport
         dob.vendor = d.vendor
         dob.btrfs_uuid = d.btrfs_uuid
         # If attached disk has an fs and it isn't btrfs
         if (d.fstype is not None and d.fstype != 'btrfs'):
             dob.btrfs_uuid = None
             dob.parted = True
         # If our existing Pool db knows of this disk's pool via it's label:
         if (Pool.objects.filter(name=d.label).exists()):
             # update the disk db object's pool field accordingly.
             dob.pool = Pool.objects.get(name=d.label)
         else:  # this disk is not known to exist in any pool via it's label
             dob.pool = None
         # If no pool has yet been found with this disk's label in and
         # the attached disk is our root disk (flagged by scan_disks)
         if (dob.pool is None and d.root is True):
             # setup our special root disk db entry in Pool
             p = Pool(name=settings.ROOT_POOL, raid='single')
             p.disk_set.add(dob)
             p.save()
             # update disk db object to reflect special root pool status
             dob.pool = p
             dob.save()
             p.size = pool_usage(mount_root(p))[0]
             enable_quota(p)
             p.uuid = btrfs_uuid(dob.name)
             p.save()
         # save our updated db disk object
         dob.save()
     # Update online db entries with S.M.A.R.T availability and status.
     for do in Disk.objects.all():
         # find all the not offline db entries
         if (not do.offline):
             # We have an attached disk db entry
             if re.match('vd', do.name):
                 # Virtio disks (named vd*) have no smart capability.
                 # avoids cluttering logs with exceptions on these devices.
                 do.smart_available = do.smart_enabled = False
                 continue
             # try to establish smart availability and status and update db
             try:
                 # for non ata/sata drives
                 do.smart_available, do.smart_enabled = smart.available(do.name)
             except Exception, e:
                 logger.exception(e)
                 do.smart_available = do.smart_enabled = False
         do.save()
Пример #46
0
 def _create_root_pool(self, d):
     p = Pool(name=settings.ROOT_POOL, raid='single')
     p.size = pool_usage(mount_root(p, d.name))[0]
     enable_quota(p, '/dev/%s' % d.name)
     p.uuid = btrfs_uuid(d.name)
     return p
Пример #47
0
    def post(self, request):
        """
        input is a list of disks, raid_level and name of the pool.
        """
        with self._handle_exception(request):
            disks = [self._validate_disk(d, request) for d in
                     request.DATA.get('disks')]
            pname = request.DATA['pname']
            if (re.match('%s$' % settings.POOL_REGEX, pname) is None):
                e_msg = ('Pool name must start with a letter(a-z) and can'
                         ' be followed by any of the following characters: '
                         'letter(a-z), digits(0-9), hyphen(-), underscore'
                         '(_) or a period(.).')
                handle_exception(Exception(e_msg), request)

            if (Pool.objects.filter(name=pname).exists()):
                e_msg = ('Pool with name: %s already exists.' % pname)
                handle_exception(Exception(e_msg), request)

            for d in disks:
                if (d.btrfs_uuid is not None):
                    e_msg = ('Another BTRFS filesystem exists on this '
                             'disk(%s). Erase the disk and try again.'
                             % d.name)
                    handle_exception(Exception(e_msg), request)

            raid_level = request.DATA['raid_level']
            if (raid_level not in self.RAID_LEVELS):
                e_msg = ('Unsupported raid level. use one of: %s' %
                         self.RAID_LEVELS)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[1] and len(disks) == 1):
                e_msg = ('More than one disk is required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[2] and len(disks) < 2):
                e_msg = ('At least two disks are required for the raid level: '
                         '%s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[3]):
                if (len(disks) < 4):
                    e_msg = ('A minimum of Four drives are required for the '
                             'raid level: %s' % raid_level)
                    handle_exception(Exception(e_msg), request)
                elif (len(disks) % 2 != 0):
                    e_msg = ('Even number of drives are required for the '
                             'raid level: %s' % raid_level)
                    handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[4] and len(disks) < 3):
                e_msg = ('Three or more disks are required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[5] and len(disks) < 4):
                e_msg = ('Four or more disks are required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)

            compression = self._validate_compression(request)
            mnt_options = self._validate_mnt_options(request)
            dnames = [d.name for d in disks]
            p = Pool(name=pname, raid=raid_level, compression=compression,
                     mnt_options=mnt_options)
            add_pool(p, dnames)
            p.size = pool_usage(mount_root(p, dnames[0]))[0]
            p.uuid = btrfs_uuid(dnames[0])
            p.disk_set.add(*disks)
            p.save()
            return Response(PoolInfoSerializer(p).data)
Пример #48
0
    def post(self, request):
        """
        input is a list of disks, raid_level and name of the pool.
        """
        with self._handle_exception(request):
            disks = [
                self._validate_disk(d, request)
                for d in request.data.get('disks')
            ]
            pname = request.data['pname']
            if (re.match('%s$' % settings.POOL_REGEX, pname) is None):
                e_msg = (
                    'Invalid characters in Pool name. Following '
                    'characters are allowed: letter(a-z or A-Z), digit(0-9), '
                    'hyphen(-), underscore(_) or a period(.).')
                handle_exception(Exception(e_msg), request)

            if (len(pname) > 255):
                e_msg = ('Pool name must be less than 255 characters')
                handle_exception(Exception(e_msg), request)

            if (Pool.objects.filter(name=pname).exists()):
                e_msg = ('Pool(%s) already exists. Choose a different name' %
                         pname)
                handle_exception(Exception(e_msg), request)

            if (Share.objects.filter(name=pname).exists()):
                e_msg = (
                    'A Share with this name(%s) exists. Pool and Share names '
                    'must be distinct. Choose a different name' % pname)
                handle_exception(Exception(e_msg), request)

            for d in disks:
                if (d.btrfs_uuid is not None):
                    e_msg = ('Another BTRFS filesystem exists on this '
                             'disk(%s). Erase the disk and try again.' %
                             d.name)
                    handle_exception(Exception(e_msg), request)

            raid_level = request.data['raid_level']
            if (raid_level not in self.RAID_LEVELS):
                e_msg = ('Unsupported raid level. use one of: {}'.format(
                    self.RAID_LEVELS))
                handle_exception(Exception(e_msg), request)
            # consolidated raid0 & raid 1 disk check
            if (raid_level in self.RAID_LEVELS[1:3] and len(disks) <= 1):
                e_msg = ('At least two disks are required for the raid level: '
                         '%s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[3]):
                if (len(disks) < 4):
                    e_msg = ('A minimum of Four drives are required for the '
                             'raid level: %s' % raid_level)
                    handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[4] and len(disks) < 2):
                e_msg = ('Two or more disks are required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)
            if (raid_level == self.RAID_LEVELS[5] and len(disks) < 3):
                e_msg = ('Three or more disks are required for the raid '
                         'level: %s' % raid_level)
                handle_exception(Exception(e_msg), request)

            compression = self._validate_compression(request)
            mnt_options = self._validate_mnt_options(request)
            dnames = [d.name for d in disks]
            p = Pool(name=pname,
                     raid=raid_level,
                     compression=compression,
                     mnt_options=mnt_options)
            p.disk_set.add(*disks)
            p.save()
            # added for loop to save disks
            # appears p.disk_set.add(*disks) was not saving disks in test environment
            for d in disks:
                d.pool = p
                d.save()
            add_pool(p, dnames)
            p.size = pool_usage(mount_root(p))[0]
            p.uuid = btrfs_uuid(dnames[0])
            p.save()
            return Response(PoolInfoSerializer(p).data)
Пример #49
0
    def _update_disk_state():
        """
        A db atomic method to update the database of attached disks / drives.
        Works only on device serial numbers for drive identification.
        Calls scan_disks to establish the current connected drives info.
        Initially removes duplicate by serial number db entries to deal
        with legacy db states and obfuscates all previous device names as they
        are transient. The drive database is then updated with the attached
        disks info and previously known drives no longer found attached are
        marked as offline. All offline drives have their SMART availability and
        activation status removed and all attached drives have their SMART
        availability assessed and activated if available.
        :return: serialized models of attached and missing disks via serial num
        """
        # Acquire a list (namedtupil collection) of attached drives > min size
        disks = scan_disks(settings.MIN_DISK_SIZE)
        serial_numbers_seen = []
        # Make sane our db entries in view of what we know we have attached.
        # Device serial number is only known external unique entry, scan_disks
        # make this so in the case of empty or repeat entries by providing
        # fake serial numbers which are in turn flagged via WebUI as unreliable.
        # 1) scrub all device names with unique but nonsense uuid4
        # 1) mark all offline disks as such via db flag
        # 2) mark all offline disks smart available and enabled flags as False
        # logger.info('update_disk_state() Called')
        for do in Disk.objects.all():
            # Replace all device names with a unique placeholder on each scan
            # N.B. do not optimize by re-using uuid index as this could lead
            # to a non refreshed webui acting upon an entry that is different
            # from that shown to the user.
            do.name = 'detached-' + str(uuid.uuid4()).replace('-', '')
            # Delete duplicate or fake by serial number db disk entries.
            # It makes no sense to save fake serial number drives between scans
            # as on each scan the serial number is re-generated (fake) anyway.
            # Serial numbers beginning with 'fake-serial-' are from scan_disks.
            if (do.serial in serial_numbers_seen) or (re.match(
                    'fake-serial-', do.serial) is not None):
                logger.info('Deleting duplicate or fake (by serial) Disk db '
                            'entry. Serial = %s' % do.serial)
                do.delete()  # django >=1.9 returns a dict of deleted items.
                # Continue onto next db disk object as nothing more to process.
                continue
            # first encounter of this serial in the db so stash it for reference
            serial_numbers_seen.append(deepcopy(do.serial))
            # Look for devices (by serial number) that are in the db but not in
            # our disk scan, ie offline / missing.
            if (do.serial not in [d.serial for d in disks]):
                # update the db entry as offline
                do.offline = True
                # disable S.M.A.R.T available and enabled flags.
                do.smart_available = do.smart_enabled = False
            do.save()  # make sure all updates are flushed to db
        # Our db now has no device name info as all dev names are place holders.
        # Iterate over attached drives to update the db's knowledge of them.
        # Kernel dev names are unique so safe to overwrite our db unique name.
        for d in disks:
            # start with an empty disk object
            dob = None
            # Convert our transient but just scanned so current sda type name
            # to a more useful by-id type name as found in /dev/disk/by-id
            byid_disk_name, is_byid = get_dev_byid_name(d.name, True)
            # If the db has an entry with this disk's serial number then
            # use this db entry and update the device name from our recent scan.
            if (Disk.objects.filter(serial=d.serial).exists()):
                dob = Disk.objects.get(serial=d.serial)
                #dob.name = d.name
                dob.name = byid_disk_name
            else:
                # We have an assumed new disk entry as no serial match in db.
                # Build a new entry for this disk.
                #dob = Disk(name=d.name, serial=d.serial)
                # N.B. we may want to force a fake-serial here if is_byid False,
                # that way we flag as unusable disk as no by-id type name found.
                # It may already have been set though as the only by-id
                # failures so far are virtio disks with no serial so scan_disks
                # will have already given it a fake serial in d.serial.
                dob = Disk(name=byid_disk_name, serial=d.serial)
            # Update the db disk object (existing or new) with our scanned info
            dob.size = d.size
            dob.parted = d.parted
            dob.offline = False  # as we are iterating over attached devices
            dob.model = d.model
            dob.transport = d.transport
            dob.vendor = d.vendor
            dob.btrfs_uuid = d.btrfs_uuid
            # If attached disk has an fs and it isn't btrfs
            if (d.fstype is not None and d.fstype != 'btrfs'):
                dob.btrfs_uuid = None
                dob.parted = True  # overload use of parted as non btrfs flag.
                # N.B. this overload use may become redundant with the addition
                # of the Disk.role field.
            # Update the role field with scan_disks findings, currently only
            # mdraid membership type based on fstype info. In the case of
            # these raid member indicators from scan_disks() we have the
            # current truth provided so update the db role status accordingly.
            # N.B. this if else could be expanded to accommodate other
            # roles based on the fs found
            if d.fstype == 'isw_raid_member' or d.fstype == 'linux_raid_member':
                # We have an indicator of mdraid membership so update existing
                # role info if any.
                # N.B. We have a minor legacy issue in that prior to using json
                # format for the db role field we stored one of 2 strings.
                # if these 2 strings are found then ignore them as we then
                # overwrite with our current finding and in the new json format.
                # I.e. non None could also be a legacy entry so follow overwrite
                # path when legacy entry found by treating as a None entry.
                # TODO: When we reset migrations the following need only check
                # TODO: "dob.role is not None"
                if dob.role is not None and dob.role != 'isw_raid_member' \
                        and dob.role != 'linux_raid_member':
                    # get our known roles into a dictionary
                    known_roles = json.loads(dob.role)
                    # create or update an mdraid dictionary entry
                    known_roles['mdraid'] = str(d.fstype)
                    # return updated dict to json format and store in db object
                    dob.role = json.dumps(known_roles)
                else:  # We have a dob.role = None so just insert our new role.
                    # Also applies to legacy pre json role entries.
                    dob.role = '{"mdraid": "' + d.fstype + '"}'  # json string
            else:  # We know this disk is not an mdraid raid member.
                # No identified role from scan_disks() fstype value (mdraid
                # only for now )so we preserve any prior known roles not
                # exposed by scan_disks but remove the mdraid role if found.
                # TODO: When we reset migrations the following need only check
                # TODO: "dob.role is not None"
                if dob.role is not None and dob.role != 'isw_raid_member' \
                        and dob.role != 'linux_raid_member':
                    # remove mdraid role if found but preserve prior roles
                    # which should now only be in json format
                    known_roles = json.loads(dob.role)
                    if 'mdraid' in known_roles:
                        if len(known_roles) > 1:
                            # mdraid is not the only entry so we have to pull
                            # out only mdraid from dict and convert back to json
                            del known_roles['mdraid']
                            dob.role = json.dumps(known_roles)
                        else:
                            # mdraid was the only entry so we need not bother
                            # with dict edit and json conversion only to end up
                            # with an empty json {} so revert to default 'None'.
                            dob.role = None
                else:  # Empty or legacy role entry.
                    # We have either None or a legacy mdraid role when this disk
                    # is no longer an mdraid member. We can now assert None.
                    dob.role = None
            # If our existing Pool db knows of this disk's pool via it's label:
            if (Pool.objects.filter(name=d.label).exists()):
                # update the disk db object's pool field accordingly.
                dob.pool = Pool.objects.get(name=d.label)

                #this is for backwards compatibility. root pools created
                #before the pool.role migration need this. It can safely be
                #removed a few versions after 3.8-11 or when we reset migrations.
                if (d.root is True):
                    dob.pool.role = 'root'
                    dob.pool.save()
            else:  # this disk is not known to exist in any pool via it's label
                dob.pool = None
            # If no pool has yet been found with this disk's label in and
            # the attached disk is our root disk (flagged by scan_disks)
            if (dob.pool is None and d.root is True):
                # setup our special root disk db entry in Pool
                # TODO: dynamically retrieve raid level.
                p = Pool(name=d.label, raid='single', role='root')
                p.disk_set.add(dob)
                p.save()
                # update disk db object to reflect special root pool status
                dob.pool = p
                dob.save()
                p.size = pool_usage(mount_root(p))[0]
                enable_quota(p)
                p.uuid = btrfs_uuid(dob.name)
                p.save()
            # save our updated db disk object
            dob.save()
        # Update online db entries with S.M.A.R.T availability and status.
        for do in Disk.objects.all():
            # find all the not offline db entries
            if (not do.offline):
                # We have an attached disk db entry.
                # Since our Disk.name model now uses by-id type names we can
                # do cheap matches to the beginnings of these names to find
                # virtio, md, or sdcard devices which are assumed to have no
                # SMART capability.
                # We also disable devices smart support when they have a
                # fake serial number as ascribed by scan_disks as any SMART
                # data collected is then less likely to be wrongly associated
                # with the next device that takes this temporary drive's name.
                # Also note that with no serial number some device types will
                # not have a by-id type name expected by the smart subsystem.
                # This has only been observed in no serial virtio devices.
                if (re.match('fake-serial-', do.serial) is not None) or \
                        (re.match('virtio-|md-|mmc-|nvme-', do.name) is not None):
                    # Virtio disks (named virtio-*), md devices (named md-*),
                    # and an sdcard reader that provides devs named mmc-* have
                    # no smart capability so avoid cluttering logs with
                    # exceptions on probing these with smart.available.
                    # nvme not yet supported by CentOS 7 smartmontools:
                    # https://www.smartmontools.org/ticket/657
                    # Thanks to @snafu in rockstor forum post 1567 for this.
                    do.smart_available = do.smart_enabled = False
                    continue
                # try to establish smart availability and status and update db
                try:
                    # for non ata/sata drives
                    do.smart_available, do.smart_enabled = smart.available(
                        do.name, do.smart_options)
                except Exception, e:
                    logger.exception(e)
                    do.smart_available = do.smart_enabled = False
            do.save()
Пример #50
0
 def _create_root_pool(self, d):
     p = Pool(name=settings.ROOT_POOL, raid='single')
     p.size = pool_usage(mount_root(p, d.name))[0]
     enable_quota(p, '/dev/%s' % d.name)
     p.uuid = btrfs_uuid(d.name)
     return p