def _btrfs_disk_import(self, dname, request): try: disk = self._validate_disk(dname, request) p_info = get_pool_info(dname) # get some options from saved config? po = Pool(name=p_info['label'], raid="unknown") # need to save it so disk objects get updated properly in the for # loop below. po.save() for d in p_info['disks']: do = Disk.objects.get(name=d) do.pool = po do.parted = False do.save() mount_root(po) po.raid = pool_raid('%s%s' % (settings.MNT_PT, po.name))['data'] po.size = pool_usage('%s%s' % (settings.MNT_PT, po.name))[0] po.save() enable_quota(po) import_shares(po, request) for share in Share.objects.filter(pool=po): import_snapshots(share) return Response(DiskInfoSerializer(disk).data) except Exception, e: e_msg = ( 'Failed to import any pool on this device(%s). Error: %s' % (dname, e.__str__())) handle_exception(Exception(e_msg), request)
def _btrfs_disk_import(self, dname, request): try: disk = self._validate_disk(dname, request) p_info = get_pool_info(dname) # get some options from saved config? po = Pool(name=p_info['label'], raid="unknown") # need to save it so disk objects get updated properly in the for # loop below. po.save() for d in p_info['disks']: do = Disk.objects.get(name=d) do.pool = po do.parted = False do.save() mount_root(po) po.raid = pool_raid('%s%s' % (settings.MNT_PT, po.name))['data'] po.size = pool_usage('%s%s' % (settings.MNT_PT, po.name))[0] po.save() enable_quota(po) import_shares(po, request) for share in Share.objects.filter(pool=po): import_snapshots(share) return Response(DiskInfoSerializer(disk).data) except Exception, e: e_msg = ('Failed to import any pool on this device(%s). Error: %s' % (dname, e.__str__())) handle_exception(Exception(e_msg), request)
def _refresh_pool_state(): for p in Pool.objects.all(): # If our pool has no disks, detached included, then delete it. # We leave pools with all detached members in place intentionally. if (p.disk_set.count() == 0): p.delete() continue # Log if no attached members are found, ie all devs are detached. if p.disk_set.attached().count() == 0: logger.error('Skipping Pool (%s) mount as there ' 'are no attached devices. Moving on.' % p.name) continue try: mount_root(p) first_attached_dev = p.disk_set.attached().first() # Observe any redirect role by using target_name. pool_info = get_pool_info(first_attached_dev.target_name) p.name = pool_info['label'] p.raid = pool_raid('%s%s' % (settings.MNT_PT, p.name))['data'] p.size = p.usage_bound() p.save() except Exception as e: logger.error('Exception while refreshing state for ' 'Pool(%s). Moving on: %s' % (p.name, e.__str__())) logger.exception(e)
def _refresh_pool_state(): for p in Pool.objects.all(): if (p.disk_set.count() == 0): p.delete() continue try: mount_root(p) first_dev = p.disk_set.first() first_dev_name = first_dev.name # if we are looking at a device with a redirect role then # redirect accordingly. if first_dev.role is not None: disk_role_dict = json.loads(first_dev.role) if 'redirect' in disk_role_dict: # consider replacing None with first_dev.name first_dev_name = disk_role_dict.get('redirect', None) pool_info = get_pool_info(first_dev_name) p.name = pool_info['label'] p.raid = pool_raid('%s%s' % (settings.MNT_PT, p.name))['data'] p.size = p.usage_bound() p.save() except Exception as e: logger.error('Exception while refreshing state for ' 'Pool(%s). Moving on: %s' % (p.name, e.__str__())) logger.exception(e)
def _refresh_pool_state(self, pool): dname = Disk.objects.filter(pool=pool)[0].name mount_root(pool, dname) pool_info = get_pool_info(dname) pool.name = pool_info['label'] pool.raid = pool_raid('%s%s' % (settings.MNT_PT, pool.name))['data'] pool.size = pool_usage('%s%s' % (settings.MNT_PT, pool.name))[0] pool.save() return pool
def _refresh_pool_state(self, pool): fd = pool.disk_set.first() if fd is None: return pool.delete() mount_root(pool, fd.name) pool_info = get_pool_info(fd.name) pool.name = pool_info["label"] pool.raid = pool_raid("%s%s" % (settings.MNT_PT, pool.name))["data"] pool.size = pool_usage("%s%s" % (settings.MNT_PT, pool.name))[0] pool.save() return pool
def _refresh_pool_state(self, pool): fd = pool.disk_set.first() if (fd is None): return pool.delete() mount_root(pool, fd.name) pool_info = get_pool_info(fd.name) pool.name = pool_info['label'] pool.raid = pool_raid('%s%s' % (settings.MNT_PT, pool.name))['data'] pool.size = pool_usage('%s%s' % (settings.MNT_PT, pool.name))[0] pool.save() return pool
def _refresh_pool_state(): for p in Pool.objects.all(): if (p.disk_set.count() == 0): p.delete() continue try: mount_root(p) fd = p.disk_set.first() pool_info = get_pool_info(fd.name) p.name = pool_info['label'] p.raid = pool_raid('%s%s' % (settings.MNT_PT, p.name))['data'] p.size = p.usage_bound() p.save() except Exception, e: logger.error('Exception while refreshing state for ' 'Pool(%s). Moving on: %s' % (p.name, e.__str__())) logger.exception(e)
def _btrfs_disk_import(self, dname, request): try: disk = self._validate_disk(dname, request) disk_name = self._role_filter_disk_name(disk, request) p_info = get_pool_info(disk_name) # get some options from saved config? po = Pool(name=p_info['label'], raid="unknown") # need to save it so disk objects get updated properly in the for # loop below. po.save() for device in p_info['disks']: disk_name, isPartition = \ self._reverse_role_filter_name(device, request) do = Disk.objects.get(name=disk_name) do.pool = po # update this disk's parted property do.parted = isPartition if isPartition: # ensure a redirect role to reach this partition; ie: # "redirect": "virtio-serial-3-part2" if do.role is not None: # db default is null / None. # Get our previous roles into a dictionary roles = json.loads(do.role) # update or add our "redirect" role with our part name roles['redirect'] = '%s' % device # convert back to json and store in disk object do.role = json.dumps(roles) else: # role=None so just add a json formatted redirect role do.role = '{"redirect": "%s"}' % device.name do.save() mount_root(po) po.raid = pool_raid('%s%s' % (settings.MNT_PT, po.name))['data'] po.size = po.usage_bound() po.save() enable_quota(po) import_shares(po, request) for share in Share.objects.filter(pool=po): import_snapshots(share) return Response(DiskInfoSerializer(disk).data) except Exception as e: e_msg = ( 'Failed to import any pool on this device(%s). Error: %s' % (dname, e.__str__())) handle_exception(Exception(e_msg), request)
def _refresh_pool_state(): for p in Pool.objects.all(): if (p.disk_set.count() == 0): p.delete() continue try: mount_root(p) fd = p.disk_set.first() pool_info = get_pool_info(fd.name) p.name = pool_info['label'] p.raid = pool_raid('%s%s' % (settings.MNT_PT, p.name))['data'] p.size = pool_usage('%s%s' % (settings.MNT_PT, p.name))[0] p.save() except Exception, e: logger.error('Exception while refreshing state for ' 'Pool(%s). Moving on: %s' % (p.name, e.__str__())) logger.exception(e)
def _refresh_pool_state(): for p in Pool.objects.all(): # If our pool has no disks, detached included, then delete it. # We leave pools with all detached members in place intentionally. if (p.disk_set.count() == 0): p.delete() continue # Log if no attached members are found, ie all devs are detached. if p.disk_set.attached().count() == 0: logger.error('Skipping Pool (%s) mount as there ' 'are no attached devices. Moving on.' % p.name) continue try: # Get and save what info we can prior to mount. first_attached_dev = p.disk_set.attached().first() is_root_pool = (p.role == 'root') # Observe any redirect role by using target_name. byid_disk_name, is_byid = get_dev_byid_name( get_device_path(first_attached_dev.target_name)) if is_byid: pool_info = get_pool_info(first_attached_dev.target_name, is_root_pool) pool_name = pool_info['label'] else: logger.error('Skipping pool ({}) mount as attached disk ' '({}) has no by-id name (no serial # ?)'. format(p.name, first_attached_dev.target_name)) continue p.name = pool_name p.save() mount_root(p) p.raid = pool_raid('%s%s' % (settings.MNT_PT, p.name))['data'] p.size = p.usage_bound() # Consider using mount_status() parse to update root pool db on # active (fstab initiated) compression setting. p.save() except Exception as e: logger.error('Exception while refreshing state for ' 'Pool(%s). Moving on: %s' % (p.name, e.__str__())) logger.exception(e)
def _btrfs_disk_import(self, dname, request): try: disk = self._validate_disk(dname, request) p_info = get_pool_info(dname) #get some options from saved config? po = Pool(name=p_info['label'], raid="unknown") for d in p_info['disks']: do = Disk.objects.get(name=d) do.pool = po do.save() mount_root(po, d) po.raid = pool_raid('%s%s' % (settings.MNT_PT, po.name))['data'] po.size = pool_usage('%s%s' % (settings.MNT_PT, po.name))[0] po.save() return Response(DiskInfoSerializer(disk).data) except Exception, e: e_msg = ('Failed to import any pool on this device(%s). Error: %s' % (dname, e.__str__())) handle_exception(Exception(e_msg), request)
def _btrfs_disk_import(self, dname, request): try: disk = self._validate_disk(dname, request) p_info = get_pool_info(dname) #get some options from saved config? po = Pool(name=p_info['label'], raid="unknown") for d in p_info['disks']: do = Disk.objects.get(name=d) do.pool = po do.save() mount_root(po, d) po.raid = pool_raid('%s%s' % (settings.MNT_PT, po.name))['data'] po.size = pool_usage('%s%s' % (settings.MNT_PT, po.name))[0] po.save() return Response(DiskInfoSerializer(disk).data) except Exception, e: e_msg = ( 'Failed to import any pool on this device(%s). Error: %s' % (dname, e.__str__())) handle_exception(Exception(e_msg), request)
def _refresh_pool_state(): for p in Pool.objects.all(): # If our pool has no disks, detached included, then delete it. # We leave pools with all detached members in place intentionally. if (p.disk_set.count() == 0): p.delete() continue # Log if no attached members are found, ie all devs are detached. if p.disk_set.attached().count() == 0: logger.error('Skipping Pool (%s) mount as there ' 'are no attached devices. Moving on.' % p.name) continue try: # Get and save what info we can prior to mount. first_attached_dev = p.disk_set.attached().first() is_root_pool = (p.role == 'root') # Observe any redirect role by using target_name. byid_disk_name, is_byid = get_dev_byid_name( get_device_path(first_attached_dev.target_name)) if is_byid: pool_info = get_pool_info(first_attached_dev.target_name, is_root_pool) pool_name = pool_info['label'] else: logger.error( 'Skipping pool ({}) mount as attached disk ' '({}) has no by-id name (no serial # ?)'.format( p.name, first_attached_dev.target_name)) continue p.name = pool_name p.save() mount_root(p) p.raid = pool_raid('%s%s' % (settings.MNT_PT, p.name))['data'] p.size = p.usage_bound() # Consider using mount_status() parse to update root pool db on # active (fstab initiated) compression setting. p.save() except Exception as e: logger.error('Exception while refreshing state for ' 'Pool(%s). Moving on: %s' % (p.name, e.__str__())) logger.exception(e)
if (command == 'disable-auto-update'): try: auto_update(enable=False) return Response({'enabled': False, }) except Exception, e: msg = ('Failed to disable auto update due to this exception: ' '%s' % e.__str__()) handle_exception(Exception(msg), request) if (command == 'refresh-pool-state'): for p in Pool.objects.all(): fd = p.disk_set.first() if (fd is None): p.delete() mount_root(p) pool_info = get_pool_info(fd.name) p.name = pool_info['label'] p.raid = pool_raid('%s%s' % (settings.MNT_PT, p.name))['data'] p.size = pool_usage('%s%s' % (settings.MNT_PT, p.name))[0] p.save() return Response() if (command == 'refresh-share-state'): for p in Pool.objects.all(): import_shares(p, request) return Response() if (command == 'refresh-snapshot-state'): for share in Share.objects.all(): import_snapshots(share) return Response()
auto_update(enable=False) return Response({ 'enabled': False, }) except Exception, e: msg = ('Failed to disable auto update due to this exception: ' '%s' % e.__str__()) handle_exception(Exception(msg), request) if (command == 'refresh-pool-state'): for p in Pool.objects.all(): fd = p.disk_set.first() if (fd is None): p.delete() mount_root(p) pool_info = get_pool_info(fd.name) p.name = pool_info['label'] p.raid = pool_raid('%s%s' % (settings.MNT_PT, p.name))['data'] p.size = pool_usage('%s%s' % (settings.MNT_PT, p.name))[0] p.save() return Response() if (command == 'refresh-share-state'): for p in Pool.objects.all(): import_shares(p, request) return Response() if (command == 'refresh-snapshot-state'): for share in Share.objects.all(): import_snapshots(share) return Response()