def dehydrate_volume_group(self, volume_group): """Return `VolumeGroup` formatted for JSON encoding.""" size = volume_group.get_size() available_size = volume_group.get_lvm_free_space() used_size = volume_group.get_lvm_allocated_size() return { "id": volume_group.id, "name": volume_group.name, "tags": [], "type": volume_group.group_type, "path": "", "size": size, "size_human": human_readable_bytes(size), "used_size": used_size, "used_size_human": human_readable_bytes(used_size), "available_size": available_size, "available_size_human": human_readable_bytes(available_size), "block_size": volume_group.get_virtual_block_device_block_size(), "model": "", "serial": "", "partition_table_type": "", "used_for": "volume group", "filesystem": None, "partitions": None, "numa_nodes": volume_group.get_numa_node_indexes(), }
def dehydrate_cache_set(self, cache_set): """Return `CacheSet` formatted for JSON encoding.""" device = cache_set.get_device() used_size = device.get_used_size() available_size = device.get_available_size() bcache_devices = sorted( [bcache.name for bcache in cache_set.filesystemgroup_set.all()] ) return { "id": cache_set.id, "name": cache_set.name, "tags": [], "type": "cache-set", "path": "", "size": device.size, "size_human": human_readable_bytes(device.size), "used_size": used_size, "used_size_human": human_readable_bytes(used_size), "available_size": available_size, "available_size_human": human_readable_bytes(available_size), "block_size": device.get_block_size(), "model": "", "serial": "", "partition_table_type": "", "used_for": ", ".join(bcache_devices), "filesystem": None, "partitions": None, "numa_nodes": cache_set.get_numa_node_indexes(), }
def dehydrate_blockdevice(self, blockdevice, obj): """Return `BlockDevice` formatted for JSON encoding.""" # model and serial are currently only avalible on physical block # devices if isinstance(blockdevice, PhysicalBlockDevice): model = blockdevice.model serial = blockdevice.serial firmware_version = blockdevice.firmware_version else: serial = model = firmware_version = "" partition_table = blockdevice.get_partitiontable() if partition_table is not None: partition_table_type = partition_table.table_type else: partition_table_type = "" is_boot = blockdevice.id == obj.get_boot_disk().id data = { "id": blockdevice.id, "is_boot": is_boot, "name": blockdevice.get_name(), "tags": blockdevice.tags, "type": blockdevice.type, "path": blockdevice.path, "size": blockdevice.size, "size_human": human_readable_bytes(blockdevice.size), "used_size": blockdevice.used_size, "used_size_human": human_readable_bytes( blockdevice.used_size), "available_size": blockdevice.available_size, "available_size_human": human_readable_bytes( blockdevice.available_size), "block_size": blockdevice.block_size, "model": model, "serial": serial, "firmware_version": firmware_version, "partition_table_type": partition_table_type, "used_for": blockdevice.used_for, "filesystem": self.dehydrate_filesystem( blockdevice.get_effective_filesystem()), "partitions": self.dehydrate_partitions( blockdevice.get_partitiontable()), } if isinstance(blockdevice, VirtualBlockDevice): data["parent"] = { "id": blockdevice.filesystem_group.id, "uuid": blockdevice.filesystem_group.uuid, "type": blockdevice.filesystem_group.group_type, } # Calculate script results status for blockdevice # if a physical block device. blockdevice_script_results = [ script_result for results in self._script_results.values() for script_results in results.values() for script_result in script_results if script_result.physical_blockdevice_id == blockdevice.id ] data["test_status"] = get_status_from_qs(blockdevice_script_results) return data
def test_GET(self): part = factory.make_Partition() name = factory.make_name('datastore') vmfs = VMFS.objects.create_vmfs(name, [part]) response = self.client.get(self.get_vmfs_uri(vmfs)) self.assertThat(response, HasStatusCode(http.client.OK)) parsed_result = json_load_bytes(response.content) self.assertThat( parsed_result, ContainsDict({ 'id': Equals(vmfs.id), 'system_id': Equals(vmfs.get_node().system_id), 'uuid': Equals(vmfs.uuid), 'name': Equals(vmfs.name), 'size': Equals(vmfs.get_size()), 'human_size': Equals(human_readable_bytes(vmfs.get_size())), 'filesystem': Equals({ 'fstype': 'vmfs6', 'mount_point': '/vmfs/volumes/%s' % name, }), })) self.assertEquals(vmfs.filesystems.count(), len(parsed_result['devices']))
def dehydrate_partitions(self, partition_table): """Return `PartitionTable` formatted for JSON encoding.""" if partition_table is None: return None partitions = [] for partition in partition_table.partitions.all(): partitions.append({ "filesystem": self.dehydrate_filesystem( partition.get_effective_filesystem()), "name": partition.get_name(), "path": partition.path, "type": partition.type, "id": partition.id, "size": partition.size, "size_human": human_readable_bytes(partition.size), "used_for": partition.used_for, "tags": partition.tags, }) return partitions
def test_GET(self): part = factory.make_Partition() name = factory.make_name("datastore") vmfs = VMFS.objects.create_vmfs(name, [part]) response = self.client.get(self.get_vmfs_uri(vmfs)) self.assertThat(response, HasStatusCode(http.client.OK)) parsed_result = json_load_bytes(response.content) self.assertThat( parsed_result, ContainsDict({ "id": Equals(vmfs.id), "system_id": Equals(vmfs.get_node().system_id), "uuid": Equals(vmfs.uuid), "name": Equals(vmfs.name), "size": Equals(vmfs.get_size()), "human_size": Equals(human_readable_bytes(vmfs.get_size())), "filesystem": Equals({ "fstype": "vmfs6", "mount_point": "/vmfs/volumes/%s" % name, }), }), ) self.assertEquals(vmfs.filesystems.count(), len(parsed_result["devices"]))
def __str__(self): return "{model} S/N {serial} {size} attached to {node}".format( model=self.model, serial=self.serial, size=human_readable_bytes(self.size), node=self.node, )
def dehydrate_blockdevice(self, blockdevice, obj): """Return `BlockDevice` formatted for JSON encoding.""" # model and serial are currently only avalible on physical block # devices if isinstance(blockdevice, PhysicalBlockDevice): model = blockdevice.model serial = blockdevice.serial else: serial = model = "" partition_table = blockdevice.get_partitiontable() if partition_table is not None: partition_table_type = partition_table.table_type else: partition_table_type = "" is_boot = blockdevice.id == obj.get_boot_disk().id data = { "id": blockdevice.id, "is_boot": is_boot, "name": blockdevice.get_name(), "tags": blockdevice.tags, "type": blockdevice.type, "path": blockdevice.path, "size": blockdevice.size, "size_human": human_readable_bytes(blockdevice.size), "used_size": blockdevice.used_size, "used_size_human": human_readable_bytes( blockdevice.used_size), "available_size": blockdevice.available_size, "available_size_human": human_readable_bytes( blockdevice.available_size), "block_size": blockdevice.block_size, "model": model, "serial": serial, "partition_table_type": partition_table_type, "used_for": blockdevice.used_for, "filesystem": self.dehydrate_filesystem( blockdevice.get_effective_filesystem()), "partitions": self.dehydrate_partitions( blockdevice.get_partitiontable()), } if isinstance(blockdevice, VirtualBlockDevice): data["parent"] = { "id": blockdevice.filesystem_group.id, "uuid": blockdevice.filesystem_group.uuid, "type": blockdevice.filesystem_group.group_type, } return data
def add_resource_template_attributes(self, resource): """Adds helper attributes to the resource.""" resource.title = self.get_resource_title(resource) resource.arch, resource.subarch = resource.split_arch() resource.number_of_nodes = self.get_number_of_nodes_deployed_for( resource) resource_set = resource.get_latest_set() if resource_set is None: resource.size = human_readable_bytes(0) resource.last_update = resource.updated resource.complete = False resource.status = "Queued for download" resource.downloading = False else: resource.size = human_readable_bytes(resource_set.total_size) resource.last_update = resource_set.updated resource.complete = resource_set.complete if not resource.complete: progress = resource_set.progress if progress > 0: resource.status = "Downloading %3.0f%%" % progress resource.downloading = True resource.icon = 'in-progress' else: resource.status = "Queued for download" resource.downloading = False resource.icon = 'queued' else: # See if the resource also exists on all the clusters. if resource in self.rack_resources: resource.status = "Synced" resource.downloading = False resource.icon = 'succeeded' else: resource.complete = False if self.racks_syncing: resource.status = "Syncing to rack controller(s)" resource.downloading = True resource.icon = 'in-progress' else: resource.status = ( "Waiting for rack controller(s) to sync") resource.downloading = False resource.icon = 'waiting'
def resource_group_to_resource(self, group): """Convert the list of resources into one resource to be used in the UI.""" # Calculate all of the values using all of the resources for # this combination. last_update = self.get_last_update_for_resources(group) unique_size = self.calculate_unique_size_for_resources(group) number_of_nodes = self.get_number_of_nodes_for_resources(group) complete = self.are_all_resources_complete(group) progress = self.get_progress_for_resources(group) # Set the computed attributes on the first resource as that will # be the only one returned to the UI. resource = group[0] resource.arch, resource.subarch = resource.split_arch() resource.title = self.get_resource_title(resource) resource.complete = complete resource.size = human_readable_bytes(unique_size) resource.last_update = last_update resource.number_of_nodes = number_of_nodes resource.complete = complete if not complete: if progress > 0: resource.status = "Downloading %3.0f%%" % progress resource.downloading = True resource.icon = 'in-progress' else: resource.status = "Queued for download" resource.downloading = False resource.icon = 'queued' else: # See if all the resources exist on all the racks. rack_has_resources = any(res in group for res in self.rack_resources) if rack_has_resources: resource.status = "Synced" resource.downloading = False resource.icon = 'succeeded' else: resource.complete = False if self.racks_syncing: resource.status = "Syncing to rack controller(s)" resource.downloading = True resource.icon = 'in-progress' else: resource.status = "Waiting for rack controller(s) to sync" resource.downloading = False resource.icon = 'waiting' return resource
def test_read(self): node = factory.make_Node() cache_set = factory.make_CacheSet(node=node) backing_block_device = factory.make_PhysicalBlockDevice(node=node) backing_filesystem = factory.make_Filesystem( fstype=FILESYSTEM_TYPE.BCACHE_BACKING, block_device=backing_block_device, ) bcache = factory.make_FilesystemGroup( group_type=FILESYSTEM_GROUP_TYPE.BCACHE, cache_set=cache_set, filesystems=[backing_filesystem], ) uri = get_bcache_device_uri(bcache) response = self.client.get(uri) self.assertEqual( http.client.OK, response.status_code, response.content ) parsed_bcache = json_load_bytes(response.content) self.assertThat( parsed_bcache, ContainsDict( { "id": Equals(bcache.id), "uuid": Equals(bcache.uuid), "name": Equals(bcache.name), "size": Equals(bcache.get_size()), "human_size": Equals( human_readable_bytes(bcache.get_size()) ), "resource_uri": Equals(get_bcache_device_uri(bcache)), "virtual_device": ContainsDict( {"id": Equals(bcache.virtual_device.id)} ), "cache_set": ContainsDict( { "id": Equals(cache_set.id), "name": Equals(cache_set.name), } ), "backing_device": ContainsDict( {"id": Equals(backing_block_device.id)} ), "system_id": Equals(bcache.get_node().system_id), } ), )
def test_cannot_save_if_size_larger_than_volume_group(self): filesystem_group = factory.make_FilesystemGroup( group_type=FILESYSTEM_GROUP_TYPE.LVM_VG) factory.make_VirtualBlockDevice(filesystem_group=filesystem_group, size=filesystem_group.get_size() / 2) new_block_device_size = filesystem_group.get_size() human_readable_size = human_readable_bytes(new_block_device_size) with ExpectedException( ValidationError, re.escape("{'__all__': ['There is not enough free space (%s) " "on volume group %s.']}" % ( human_readable_size, filesystem_group.name, ))): factory.make_VirtualBlockDevice(filesystem_group=filesystem_group, size=new_block_device_size)
def clean(self, *args, **kwargs): super(VirtualBlockDevice, self).clean(*args, **kwargs) # First time called the node might not be set, so we handle the # DoesNotExist exception accordingly. try: node = self.node except Node.DoesNotExist: # Set the node of this virtual block device, to the same node from # the attached filesystem group. fsgroup_node = self.filesystem_group.get_node() if fsgroup_node is not None: self.node = fsgroup_node else: # The node on the virtual block device must be the same node from # the attached filesystem group. if node != self.filesystem_group.get_node(): raise ValidationError( "Node must be the same node as the filesystem_group." ) # Check if the size of this is not larger than the free size of # its filesystem group if its lvm. if self.filesystem_group.is_lvm(): # align virtual partition to partition alignment size # otherwise on creation it may be rounded up, overfilling group self.size = round_size_to_nearest_block( self.size, PARTITION_ALIGNMENT_SIZE, False ) if self.size > self.filesystem_group.get_lvm_free_space( skip_volumes=[self] ): raise ValidationError( "There is not enough free space (%s) " "on volume group %s." % ( human_readable_bytes(self.size), self.filesystem_group.name, ) ) else: # If not a volume group the size of the virtual block device # must equal the size of the filesystem group. assert self.size == self.filesystem_group.get_size()
def human_used_size(cls, volume_group): return human_readable_bytes(volume_group.get_lvm_allocated_size())
def human_size(cls, vmfs): return human_readable_bytes(vmfs.get_size())
def display_size(self, include_suffix=True): return human_readable_bytes(self.size, include_suffix=include_suffix)
def test_read(self): node = factory.make_Node() block_devices = [ factory.make_PhysicalBlockDevice(node=node) for _ in range(3) ] block_device_ids = [bd.id for bd in block_devices] bd_filesystems = [ factory.make_Filesystem(fstype=FILESYSTEM_TYPE.RAID, block_device=bd) for bd in block_devices ] spare_block_devices = [ factory.make_PhysicalBlockDevice(node=node) for _ in range(3) ] spare_block_device_ids = [bd.id for bd in spare_block_devices] spare_bd_filesystems = [ factory.make_Filesystem(fstype=FILESYSTEM_TYPE.RAID_SPARE, block_device=bd) for bd in spare_block_devices ] partitions = [ factory.make_Partition(partition_table=factory.make_PartitionTable( block_device=factory.make_PhysicalBlockDevice(node=node))) for _ in range(3) ] partitions_ids = [partition.id for partition in partitions] partition_filesystems = [ factory.make_Filesystem(fstype=FILESYSTEM_TYPE.RAID, partition=partition) for partition in partitions ] spare_partitions = [ factory.make_Partition(partition_table=factory.make_PartitionTable( block_device=factory.make_PhysicalBlockDevice(node=node))) for _ in range(3) ] spare_partitions_ids = [partition.id for partition in spare_partitions] spare_partition_filesystems = [ factory.make_Filesystem(fstype=FILESYSTEM_TYPE.RAID_SPARE, partition=partition) for partition in spare_partitions ] raid = factory.make_FilesystemGroup( group_type=FILESYSTEM_GROUP_TYPE.RAID_5, filesystems=(bd_filesystems + spare_bd_filesystems + partition_filesystems + spare_partition_filesystems)) uri = get_raid_device_uri(raid) response = self.client.get(uri) self.assertEqual(http.client.OK, response.status_code, response.content) parsed_raid = json.loads( response.content.decode(settings.DEFAULT_CHARSET)) parsed_device_ids = [device["id"] for device in parsed_raid["devices"]] parsed_spare_device_ids = [ device["id"] for device in parsed_raid["spare_devices"] ] self.assertThat( parsed_raid, ContainsDict({ "id": Equals(raid.id), "uuid": Equals(raid.uuid), "name": Equals(raid.name), "level": Equals(raid.group_type), "size": Equals(raid.get_size()), "human_size": Equals(human_readable_bytes(raid.get_size())), "resource_uri": Equals(get_raid_device_uri(raid)), "system_id": Equals(node.system_id), })) self.assertItemsEqual(block_device_ids + partitions_ids, parsed_device_ids) self.assertItemsEqual(spare_block_device_ids + spare_partitions_ids, parsed_spare_device_ids) self.assertEqual(raid.virtual_device.id, parsed_raid["virtual_device"]["id"])
def test_read(self): node = factory.make_Node() block_devices = [ factory.make_PhysicalBlockDevice(node=node) for _ in range(3) ] block_device_ids = [bd.id for bd in block_devices] bd_filesystems = [ factory.make_Filesystem(fstype=FILESYSTEM_TYPE.LVM_PV, block_device=bd) for bd in block_devices ] partitions = [ factory.make_Partition(partition_table=factory.make_PartitionTable( block_device=factory.make_PhysicalBlockDevice(node=node))) for _ in range(3) ] partitions_ids = [partition.id for partition in partitions] partition_filesystems = [ factory.make_Filesystem(fstype=FILESYSTEM_TYPE.LVM_PV, partition=partition) for partition in partitions ] volume_group = factory.make_FilesystemGroup( group_type=FILESYSTEM_GROUP_TYPE.LVM_VG, filesystems=bd_filesystems + partition_filesystems, ) logical_volume_ids = [ factory.make_VirtualBlockDevice(filesystem_group=volume_group, size=bd.size).id for bd in block_devices ] uri = get_volume_group_uri(volume_group) response = self.client.get(uri) self.assertEqual(http.client.OK, response.status_code, response.content) parsed_volume_group = json.loads( response.content.decode(settings.DEFAULT_CHARSET)) parsed_device_ids = [ device["id"] for device in parsed_volume_group["devices"] ] parsed_logical_volume_ids = [ lv["id"] for lv in parsed_volume_group["logical_volumes"] ] self.assertThat( parsed_volume_group, ContainsDict({ "id": Equals(volume_group.id), "uuid": Equals(volume_group.uuid), "name": Equals(volume_group.name), "size": Equals(volume_group.get_size()), "human_size": Equals(human_readable_bytes(volume_group.get_size())), "available_size": Equals(volume_group.get_lvm_free_space()), "human_available_size": Equals(human_readable_bytes( volume_group.get_lvm_free_space())), "used_size": Equals(volume_group.get_lvm_allocated_size()), "human_used_size": Equals( human_readable_bytes( volume_group.get_lvm_allocated_size())), "resource_uri": Equals(get_volume_group_uri(volume_group, test_plural=False)), "system_id": Equals(node.system_id), }), ) self.assertItemsEqual(block_device_ids + partitions_ids, parsed_device_ids) self.assertItemsEqual(logical_volume_ids, parsed_logical_volume_ids)
def test_returns_size_with_suffix(self): self.assertEqual( "%s %s" % (self.output, self.suffix), human_readable_bytes(self.size), )
def human_size(cls, raid): return human_readable_bytes(raid.get_size())
def human_size(cls, bcache): return human_readable_bytes(bcache.get_size())
def human_available_size(cls, volume_group): return human_readable_bytes(volume_group.get_lvm_free_space())
def __str__(self): return "{size} partition on {bd}".format( size=human_readable_bytes(self.size), bd=self.partition_table.block_device.__str__())
def test_returns_size_without_suffix(self): self.assertEqual(self.output, human_readable_bytes(self.size, include_suffix=False))
def __str__(self): return "{size} attached to {node}".format(size=human_readable_bytes( self.size), node=self.node)
def human_size(cls, filesystem_group): return human_readable_bytes(filesystem_group.get_size())