def test_get_counters(self): disks = [(virt_inspector.Disk(device='vda'), virt_inspector.DiskStats(read_bytes=1L, read_requests=2L, write_bytes=3L, write_requests=4L, errors=-1L))] self.inspector.inspect_disks(self.instance.name).AndReturn(disks) self.mox.ReplayAll() mgr = manager.AgentManager() pollster = pollsters.DiskIOPollster() counters = list(pollster.get_counters(mgr, self.instance)) assert counters self.assertEqual(set([c.name for c in counters]), set(pollster.get_counter_names())) def _verify_disk_metering(name, expected_volume): match = [c for c in counters if c.name == name] self.assertEquals(len(match), 1, 'missing counter %s' % name) self.assertEquals(match[0].volume, expected_volume) self.assertEquals(match[0].type, 'cumulative') _verify_disk_metering('disk.read.requests', 2L) _verify_disk_metering('disk.read.bytes', 1L) _verify_disk_metering('disk.write.requests', 4L) _verify_disk_metering('disk.write.bytes', 3L)
def inspect_disk_info(self, instance): domain = self._get_domain_not_shut_off_or_raise(instance) tree = etree.fromstring(domain.XMLDesc(0)) for disk in tree.findall('devices/disk'): disk_type = disk.get('type') if disk_type: if disk_type == 'network': LOG.warning( _LW('Inspection disk usage of network disk ' '%(instance_uuid)s unsupported by libvirt') % {'instance_uuid': instance.id}) continue # NOTE(lhx): "cdrom" device associated to the configdrive # no longer has a "source" element. Releated bug: # https://bugs.launchpad.net/ceilometer/+bug/1622718 if disk.find('source') is None: continue target = disk.find('target') device = target.get('dev') if device: dsk = virt_inspector.Disk(device=device) block_info = domain.blockInfo(device) info = virt_inspector.DiskInfo(capacity=block_info[0], allocation=block_info[1], physical=block_info[2]) yield (dsk, info)
def inspect_disk_rates(self, instance, duration=None): vm_mobj = self._get_vm_mobj_not_power_off_or_raise(instance) disk_stats = {} disk_ids = set() disk_counters = [ VC_DISK_READ_RATE_CNTR, VC_DISK_READ_REQUESTS_RATE_CNTR, VC_DISK_WRITE_RATE_CNTR, VC_DISK_WRITE_REQUESTS_RATE_CNTR ] for disk_counter in disk_counters: disk_counter_id = self._ops.get_perf_counter_id(disk_counter) disk_id_to_stat_map = self._ops.query_vm_device_stats( vm_mobj, disk_counter_id, duration) disk_stats[disk_counter] = disk_id_to_stat_map disk_ids.update(six.iterkeys(disk_id_to_stat_map)) for disk_id in disk_ids: def stat_val(counter_name): return disk_stats[counter_name].get(disk_id, 0) disk = virt_inspector.Disk(device=disk_id) # Stats provided from vSphere are in KB/s, converting it to B/s. disk_rate_info = virt_inspector.DiskRateStats( read_bytes_rate=stat_val(VC_DISK_READ_RATE_CNTR) * units.Ki, read_requests_rate=stat_val(VC_DISK_READ_REQUESTS_RATE_CNTR), write_bytes_rate=stat_val(VC_DISK_WRITE_RATE_CNTR) * units.Ki, write_requests_rate=stat_val(VC_DISK_WRITE_REQUESTS_RATE_CNTR)) yield (disk, disk_rate_info)
def inspect_disk_rates(self, instance, duration=None): vm_moid = self._ops.get_vm_moid(instance.id) if not vm_moid: raise virt_inspector.InstanceNotFoundException( _('VM %s not found in VMware Vsphere') % instance.id) disk_stats = {} disk_ids = set() disk_counters = [ VC_DISK_READ_RATE_CNTR, VC_DISK_READ_REQUESTS_RATE_CNTR, VC_DISK_WRITE_RATE_CNTR, VC_DISK_WRITE_REQUESTS_RATE_CNTR ] for disk_counter in disk_counters: disk_counter_id = self._ops.get_perf_counter_id(disk_counter) disk_id_to_stat_map = self._ops.query_vm_device_stats( vm_moid, disk_counter_id, duration) disk_stats[disk_counter] = disk_id_to_stat_map disk_ids.update(disk_id_to_stat_map.iterkeys()) for disk_id in disk_ids: def stat_val(counter_name): return disk_stats[counter_name].get(disk_id, 0) disk = virt_inspector.Disk(device=disk_id) # Stats provided from vSphere are in KB/s, converting it to B/s. disk_rate_info = virt_inspector.DiskRateStats( read_bytes_rate=stat_val(VC_DISK_READ_RATE_CNTR) * units.Ki, read_requests_rate=stat_val(VC_DISK_READ_REQUESTS_RATE_CNTR), write_bytes_rate=stat_val(VC_DISK_WRITE_RATE_CNTR) * units.Ki, write_requests_rate=stat_val(VC_DISK_WRITE_REQUESTS_RATE_CNTR)) yield (disk, disk_rate_info)
def inspect_disk_iops(self, instance): instance_name = util.instance_name(instance) for disk_metrics in self._utils.get_disk_iops_count(instance_name): disk = virt_inspector.Disk(device=disk_metrics['instance_id']) stats = virt_inspector.DiskIOPSStats( iops_count=disk_metrics['iops_count']) yield (disk, stats)
class TestDiskRatePollsters(base.TestPollsterBase): DISKS = [(virt_inspector.Disk(device='disk1'), virt_inspector.DiskRateStats(1024, 300, 5120, 700)), (virt_inspector.Disk(device='disk2'), virt_inspector.DiskRateStats(2048, 400, 6144, 800))] def setUp(self): super(TestDiskRatePollsters, self).setUp() self.inspector.inspect_disk_rates = \ mock.Mock(return_value=self.DISKS) @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def _check_get_samples(self, factory, sample_name, expected_volume): pollster = factory() mgr = manager.AgentManager() cache = {} samples = list(pollster.get_samples(mgr, cache, [self.instance])) self.assertIsNotEmpty(samples) self.assertIsNotNone(samples) self.assertIn(pollster.CACHE_KEY_DISK_RATE, cache) self.assertIn(self.instance.id, cache[pollster.CACHE_KEY_DISK_RATE]) self.assertEqual(set([sample_name]), set([s.name for s in samples])) match = [s for s in samples if s.name == sample_name] self.assertEqual(1, len(match), 'missing counter %s' % sample_name) self.assertEqual(expected_volume, match[0].volume) self.assertEqual('gauge', match[0].type) def test_disk_read_bytes_rate(self): self._check_get_samples(disk.ReadBytesRatePollster, 'disk.read.bytes.rate', 3072L) def test_disk_read_requests_rate(self): self._check_get_samples(disk.ReadRequestsRatePollster, 'disk.read.requests.rate', 700L) def test_disk_write_bytes_rate(self): self._check_get_samples(disk.WriteBytesRatePollster, 'disk.write.bytes.rate', 11264L) def test_disk_write_requests_rate(self): self._check_get_samples(disk.WriteRequestsRatePollster, 'disk.write.requests.rate', 1500L)
def inspect_disk_latency(self, instance): instance_name = util.instance_name(instance) for disk_metrics in self._utils.get_disk_latency_metrics( instance_name): disk = virt_inspector.Disk(device=disk_metrics['instance_id']) stats = virt_inspector.DiskLatencyStats( disk_latency=disk_metrics['disk_latency']) yield (disk, stats)
class TestDiskLatencyPollsters(TestBaseDiskIO): DISKS = [(virt_inspector.Disk(device='disk1'), virt_inspector.DiskLatencyStats(1000)), (virt_inspector.Disk(device='disk2'), virt_inspector.DiskLatencyStats(2000))] TYPE = 'gauge' def setUp(self): super(TestDiskLatencyPollsters, self).setUp() self.inspector.inspect_disk_latency = mock.Mock( return_value=self.DISKS) @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def _check_get_samples(self, factory, sample_name, expected_count=2): pollster = factory() mgr = manager.AgentManager() cache = {} samples = list(pollster.get_samples(mgr, cache, self.instance)) self.assertIsNotNone(samples) self.assertIsNotEmpty(samples) self.assertIn(pollster.CACHE_KEY_DISK_LATENCY, cache) for instance in self.instance: self.assertIn(instance.id, cache[pollster.CACHE_KEY_DISK_LATENCY]) self.assertEqual(set([sample_name]), set([s.name for s in samples])) match = [s for s in samples if s.name == sample_name] self.assertEqual(expected_count, len(match), 'missing counter %s' % sample_name) return match def test_disk_latency(self): self._check_aggregate_samples(disk.DiskLatencyPollster, 'disk.latency', 3) def test_per_device_latency(self): self._check_per_device_samples(disk.PerDeviceDiskLatencyPollster, 'disk.device.latency', 1, 'disk1') self._check_per_device_samples(disk.PerDeviceDiskLatencyPollster, 'disk.device.latency', 2, 'disk2')
def inspect_disks(self, instance_name): for disk_metrics in self._utils.get_disk_metrics(instance_name): disk = virt_inspector.Disk(device=disk_metrics['instance_id']) stats = virt_inspector.DiskStats( read_requests=0, # Return bytes read_bytes=disk_metrics['read_mb'] * units.Mi, write_requests=0, write_bytes=disk_metrics['write_mb'] * units.Mi, errors=0) yield (disk, stats)
class TestDiskLatencyPollsters(TestBaseDiskIO): DISKS = [(virt_inspector.Disk(device='disk1'), virt_inspector.DiskLatencyStats(1000)), (virt_inspector.Disk(device='disk2'), virt_inspector.DiskLatencyStats(2000))] TYPE = 'gauge' CACHE_KEY = "CACHE_KEY_DISK_LATENCY" def setUp(self): super(TestDiskLatencyPollsters, self).setUp() self.inspector.inspect_disk_latency = mock.Mock( return_value=self.DISKS) def test_disk_latency(self): self._check_aggregate_samples(disk.DiskLatencyPollster, 'disk.latency', 3) def test_per_device_latency(self): self._check_per_device_samples(disk.PerDeviceDiskLatencyPollster, 'disk.device.latency', 1, 'disk1') self._check_per_device_samples(disk.PerDeviceDiskLatencyPollster, 'disk.device.latency', 2, 'disk2')
def inspect_disk_info(self, instance): domain = self._get_domain_not_shut_off_or_raise(instance) tree = etree.fromstring(domain.XMLDesc(0)) for device in filter(bool, [ target.get("dev") for target in tree.findall('devices/disk/target') ]): disk = virt_inspector.Disk(device=device) block_info = domain.blockInfo(device) info = virt_inspector.DiskInfo(capacity=block_info[0], allocation=block_info[1], physical=block_info[2]) yield (disk, info)
def inspect_disks(self, instance_name): domain = self._lookup_by_name(instance_name) tree = etree.fromstring(domain.XMLDesc(0)) for device in filter( bool, [target.get("dev") for target in tree.findall('devices/disk/target')]): disk = virt_inspector.Disk(device=device) block_stats = domain.blockStats(device) stats = virt_inspector.DiskStats(read_requests=block_stats[0], read_bytes=block_stats[1], write_requests=block_stats[2], write_bytes=block_stats[3], errors=block_stats[4]) yield (disk, stats)
class TestDiskIOPSPollsters(TestBaseDiskIO): DISKS = [ (virt_inspector.Disk(device='disk1'), virt_inspector.DiskIOPSStats(10)), (virt_inspector.Disk(device='disk2'), virt_inspector.DiskIOPSStats(20)), ] TYPE = 'gauge' CACHE_KEY = "CACHE_KEY_DISK_IOPS" def setUp(self): super(TestDiskIOPSPollsters, self).setUp() self.inspector.inspect_disk_iops = mock.Mock(return_value=self.DISKS) def test_disk_iops(self): self._check_aggregate_samples(disk.DiskIOPSPollster, 'disk.iops', 30) def test_per_device_iops(self): self._check_per_device_samples(disk.PerDeviceDiskIOPSPollster, 'disk.device.iops', 10, 'disk1') self._check_per_device_samples(disk.PerDeviceDiskIOPSPollster, 'disk.device.iops', 20, 'disk2')
def inspect_disks(self, instance_name): for disk_metrics in self._utils.get_disk_metrics(instance_name): device = dict([(i, disk_metrics[i]) for i in ['instance_id', 'host_resource'] if i in disk_metrics]) disk = virt_inspector.Disk(device=device) stats = virt_inspector.DiskStats( read_requests=0, # Return bytes read_bytes=disk_metrics['read_mb'] * 1024, write_requests=0, write_bytes=disk_metrics['write_mb'] * 1024, errors=0) yield (disk, stats)
class TestDiskPollsters(base.TestPollsterBase): DISKS = [(virt_inspector.Disk(device='vda'), virt_inspector.DiskStats(read_bytes=1L, read_requests=2L, write_bytes=3L, write_requests=4L, errors=-1L))] def setUp(self): super(TestDiskPollsters, self).setUp() self.inspector.inspect_disks(self.instance.name).AndReturn(self.DISKS) self.mox.ReplayAll() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def _check_get_samples(self, factory, name, expected_volume): pollster = factory() mgr = manager.AgentManager() cache = {} samples = list(pollster.get_samples(mgr, cache, self.instance)) assert samples assert pollster.CACHE_KEY_DISK in cache assert self.instance.name in cache[pollster.CACHE_KEY_DISK] self.assertEqual(set([s.name for s in samples]), set([name])) match = [s for s in samples if s.name == name] self.assertEquals(len(match), 1, 'missing counter %s' % name) self.assertEquals(match[0].volume, expected_volume) self.assertEquals(match[0].type, 'cumulative') def test_disk_read_requests(self): self._check_get_samples(disk.ReadRequestsPollster, 'disk.read.requests', 2L) def test_disk_read_bytes(self): self._check_get_samples(disk.ReadBytesPollster, 'disk.read.bytes', 1L) def test_disk_write_requests(self): self._check_get_samples(disk.WriteRequestsPollster, 'disk.write.requests', 4L) def test_disk_write_bytes(self): self._check_get_samples(disk.WriteBytesPollster, 'disk.write.bytes', 3L)
def inspect_disks(self, instance): """Inspect the disk statistics for an instance. The response is a generator of the values. :param instance: the target instance :return disk: The Disk indicating the device for the storage device. :return stats: The DiskStats indicating the read/write data to the device. """ # Get the current and previous sample. Delta is performed between # these two. uuid = self._puuid(instance) cur_date, cur_metric = self.vm_metrics.get_latest_metric(uuid) # If the cur_metric is none, then the instance can not be found in the # sample and an error should be raised. if cur_metric is None: raise virt_inspector.InstanceNotFoundException( _('VM %s not found in PowerVM Metrics Sample.') % instance.name) # If there isn't storage information, this is because the Virtual # I/O Metrics were turned off. Have to pass through this method. if cur_metric.storage is None: LOG.debug("Current storage metric was unavailable from the API " "instance %s." % instance.name) return # Bundle together the SCSI and virtual FC adapters adpts = cur_metric.storage.virt_adpts + cur_metric.storage.vfc_adpts # Loop through all the storage adapters for adpt in adpts: # PowerVM only shows the connection (SCSI or FC). Name after # the connection name disk = virt_inspector.Disk(device=adpt.name) stats = virt_inspector.DiskStats(read_requests=adpt.num_reads, read_bytes=adpt.read_bytes, write_requests=adpt.num_writes, write_bytes=adpt.write_bytes, errors=0) yield (disk, stats)
def inspect_disk_info(self, instance): domain = self._get_domain_not_shut_off_or_raise(instance) tree = etree.fromstring(domain.XMLDesc(0)) for disk in tree.findall('devices/disk'): disk_type = disk.get('type') if disk_type: if disk_type == 'network': LOG.warning( _LW('Inspection disk usage of network disk ' '%(instance_uuid)s unsupported by libvirt') % {'instance_uuid': instance.id}) continue target = disk.find('target') device = target.get('dev') if device: dsk = virt_inspector.Disk(device=device) block_info = domain.blockInfo(device) info = virt_inspector.DiskInfo(capacity=block_info[0], allocation=block_info[1], physical=block_info[2]) yield (dsk, info)
def inspect_disks(self, instance_name): domain = self._lookup_by_name(instance_name) state = domain.info()[0] if state == libvirt.VIR_DOMAIN_SHUTOFF: LOG.warn(_('Failed to inspect disks of %(instance_name)s, ' 'domain is in state of SHUTOFF'), {'instance_name': instance_name}) return tree = etree.fromstring(domain.XMLDesc(0)) for device in filter( bool, [target.get("dev") for target in tree.findall('devices/disk/target')]): disk = virt_inspector.Disk(device=device) block_stats = domain.blockStats(device) stats = virt_inspector.DiskStats(read_requests=block_stats[0], read_bytes=block_stats[1], write_requests=block_stats[2], write_bytes=block_stats[3], errors=block_stats[4]) yield (disk, stats)
def inspect_disk_rates(self, instance, duration=None): instance_name = util.instance_name(instance) vm_ref = self._lookup_by_name(instance_name) vbd_refs = self._call_xenapi("VM.get_VBDs", vm_ref) if vbd_refs: for vbd_ref in vbd_refs: vbd_rec = self._call_xenapi("VBD.get_record", vbd_ref) disk = virt_inspector.Disk(device=vbd_rec['device']) read_rate = float( self._call_xenapi("VM.query_data_source", vm_ref, "vbd_%s_read" % vbd_rec['device'])) write_rate = float( self._call_xenapi("VM.query_data_source", vm_ref, "vbd_%s_write" % vbd_rec['device'])) disk_rate_info = virt_inspector.DiskRateStats( read_bytes_rate=read_rate, read_requests_rate=0, write_bytes_rate=write_rate, write_requests_rate=0) yield (disk, disk_rate_info)
def inspect_disk_rates(self, instance, duration=None): instance_name = util.instance_name(instance) vm_ref = self._lookup_by_name(instance_name) vbd_refs = self._call_xenapi("VM.get_VBDs", vm_ref) if vbd_refs: for vbd_ref in vbd_refs: vbd_rec = self._call_xenapi("VBD.get_record", vbd_ref) vbd_metrics_ref = self._call_xenapi("VBD.get_metrics", vbd_ref) vbd_metrics_rec = self._call_xenapi("VBD_metrics.get_record", vbd_metrics_ref) disk = virt_inspector.Disk(device=vbd_rec['device']) # Stats provided from XenServer are in KB/s, # converting it to B/s. read_rate = float(vbd_metrics_rec['io_read_kbs']) * units.Ki write_rate = float(vbd_metrics_rec['io_write_kbs']) * units.Ki disk_rate_info = virt_inspector.DiskRateStats( read_bytes_rate=read_rate, read_requests_rate=0, write_bytes_rate=write_rate, write_requests_rate=0) yield (disk, disk_rate_info)
class TestDiskPollsters(TestBaseDiskIO): DISKS = [ (virt_inspector.Disk(device='vda1'), virt_inspector.DiskStats(read_bytes=1L, read_requests=2L, write_bytes=3L, write_requests=4L, errors=-1L)), (virt_inspector.Disk(device='vda2'), virt_inspector.DiskStats(read_bytes=2L, read_requests=3L, write_bytes=5L, write_requests=7L, errors=-1L)), ] def setUp(self): super(TestDiskPollsters, self).setUp() self.inspector.inspect_disks = mock.Mock(return_value=self.DISKS) @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def _check_get_samples(self, factory, name, expected_count=2): pollster = factory() mgr = manager.AgentManager() cache = {} samples = list(pollster.get_samples(mgr, cache, self.instance)) self.assertIsNotEmpty(samples) self.assertIn(pollster.CACHE_KEY_DISK, cache) for instance in self.instance: self.assertIn(instance.name, cache[pollster.CACHE_KEY_DISK]) self.assertEqual(set([name]), set([s.name for s in samples])) match = [s for s in samples if s.name == name] self.assertEqual(len(match), expected_count, 'missing counter %s' % name) return match def test_disk_read_requests(self): self._check_aggregate_samples(disk.ReadRequestsPollster, 'disk.read.requests', 5L, expected_device=['vda1', 'vda2']) def test_disk_read_bytes(self): self._check_aggregate_samples(disk.ReadBytesPollster, 'disk.read.bytes', 3L, expected_device=['vda1', 'vda2']) def test_disk_write_requests(self): self._check_aggregate_samples(disk.WriteRequestsPollster, 'disk.write.requests', 11L, expected_device=['vda1', 'vda2']) def test_disk_write_bytes(self): self._check_aggregate_samples(disk.WriteBytesPollster, 'disk.write.bytes', 8L, expected_device=['vda1', 'vda2']) def test_per_disk_read_requests(self): self._check_per_device_samples(disk.PerDeviceReadRequestsPollster, 'disk.device.read.requests', 2L, 'vda1') self._check_per_device_samples(disk.PerDeviceReadRequestsPollster, 'disk.device.read.requests', 3L, 'vda2') def test_per_disk_write_requests(self): self._check_per_device_samples(disk.PerDeviceWriteRequestsPollster, 'disk.device.write.requests', 4L, 'vda1') self._check_per_device_samples(disk.PerDeviceWriteRequestsPollster, 'disk.device.write.requests', 7L, 'vda2') def test_per_disk_read_bytes(self): self._check_per_device_samples(disk.PerDeviceReadBytesPollster, 'disk.device.read.bytes', 1L, 'vda1') self._check_per_device_samples(disk.PerDeviceReadBytesPollster, 'disk.device.read.bytes', 2L, 'vda2') def test_per_disk_write_bytes(self): self._check_per_device_samples(disk.PerDeviceWriteBytesPollster, 'disk.device.write.bytes', 3L, 'vda1') self._check_per_device_samples(disk.PerDeviceWriteBytesPollster, 'disk.device.write.bytes', 5L, 'vda2')
class TestDiskRatePollsters(TestBaseDiskIO): DISKS = [(virt_inspector.Disk(device='disk1'), virt_inspector.DiskRateStats(1024, 300, 5120, 700)), (virt_inspector.Disk(device='disk2'), virt_inspector.DiskRateStats(2048, 400, 6144, 800))] TYPE = 'gauge' def setUp(self): super(TestDiskRatePollsters, self).setUp() self.inspector.inspect_disk_rates = mock.Mock(return_value=self.DISKS) @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def _check_get_samples(self, factory, sample_name, expected_count=2): pollster = factory() mgr = manager.AgentManager() cache = {} samples = list(pollster.get_samples(mgr, cache, self.instance)) self.assertIsNotEmpty(samples) self.assertIsNotNone(samples) self.assertIn(pollster.CACHE_KEY_DISK_RATE, cache) for instance in self.instance: self.assertIn(instance.id, cache[pollster.CACHE_KEY_DISK_RATE]) self.assertEqual(set([sample_name]), set([s.name for s in samples])) match = [s for s in samples if s.name == sample_name] self.assertEqual(expected_count, len(match), 'missing counter %s' % sample_name) return match def test_disk_read_bytes_rate(self): self._check_aggregate_samples(disk.ReadBytesRatePollster, 'disk.read.bytes.rate', 3072L, expected_device=['disk1', 'disk2']) def test_disk_read_requests_rate(self): self._check_aggregate_samples(disk.ReadRequestsRatePollster, 'disk.read.requests.rate', 700L, expected_device=['disk1', 'disk2']) def test_disk_write_bytes_rate(self): self._check_aggregate_samples(disk.WriteBytesRatePollster, 'disk.write.bytes.rate', 11264L, expected_device=['disk1', 'disk2']) def test_disk_write_requests_rate(self): self._check_aggregate_samples(disk.WriteRequestsRatePollster, 'disk.write.requests.rate', 1500L, expected_device=['disk1', 'disk2']) def test_per_disk_read_bytes_rate(self): self._check_per_device_samples(disk.PerDeviceReadBytesRatePollster, 'disk.device.read.bytes.rate', 1024L, 'disk1') self._check_per_device_samples(disk.PerDeviceReadBytesRatePollster, 'disk.device.read.bytes.rate', 2048L, 'disk2') def test_per_disk_read_requests_rate(self): self._check_per_device_samples(disk.PerDeviceReadRequestsRatePollster, 'disk.device.read.requests.rate', 300L, 'disk1') self._check_per_device_samples(disk.PerDeviceReadRequestsRatePollster, 'disk.device.read.requests.rate', 400L, 'disk2') def test_per_disk_write_bytes_rate(self): self._check_per_device_samples(disk.PerDeviceWriteBytesRatePollster, 'disk.device.write.bytes.rate', 5120L, 'disk1') self._check_per_device_samples(disk.PerDeviceWriteBytesRatePollster, 'disk.device.write.bytes.rate', 6144L, 'disk2') def test_per_disk_write_requests_rate(self): self._check_per_device_samples(disk.PerDeviceWriteRequestsRatePollster, 'disk.device.write.requests.rate', 700L, 'disk1') self._check_per_device_samples(disk.PerDeviceWriteRequestsRatePollster, 'disk.device.write.requests.rate', 800L, 'disk2')
class TestDiskPollsters(TestBaseDiskIO): DISKS = [ (virt_inspector.Disk(device='vda1'), virt_inspector.DiskStats(read_bytes=1, read_requests=2, write_bytes=3, write_requests=4, errors=-1)), (virt_inspector.Disk(device='vda2'), virt_inspector.DiskStats(read_bytes=2, read_requests=3, write_bytes=5, write_requests=7, errors=-1)), ] CACHE_KEY = "CACHE_KEY_DISK" def setUp(self): super(TestDiskPollsters, self).setUp() self.inspector.inspect_disks = mock.Mock(return_value=self.DISKS) def test_disk_read_requests(self): self._check_aggregate_samples(disk.ReadRequestsPollster, 'disk.read.requests', 5, expected_device=['vda1', 'vda2']) def test_disk_read_bytes(self): self._check_aggregate_samples(disk.ReadBytesPollster, 'disk.read.bytes', 3, expected_device=['vda1', 'vda2']) def test_disk_write_requests(self): self._check_aggregate_samples(disk.WriteRequestsPollster, 'disk.write.requests', 11, expected_device=['vda1', 'vda2']) def test_disk_write_bytes(self): self._check_aggregate_samples(disk.WriteBytesPollster, 'disk.write.bytes', 8, expected_device=['vda1', 'vda2']) def test_per_disk_read_requests(self): self._check_per_device_samples(disk.PerDeviceReadRequestsPollster, 'disk.device.read.requests', 2, 'vda1') self._check_per_device_samples(disk.PerDeviceReadRequestsPollster, 'disk.device.read.requests', 3, 'vda2') def test_per_disk_write_requests(self): self._check_per_device_samples(disk.PerDeviceWriteRequestsPollster, 'disk.device.write.requests', 4, 'vda1') self._check_per_device_samples(disk.PerDeviceWriteRequestsPollster, 'disk.device.write.requests', 7, 'vda2') def test_per_disk_read_bytes(self): self._check_per_device_samples(disk.PerDeviceReadBytesPollster, 'disk.device.read.bytes', 1, 'vda1') self._check_per_device_samples(disk.PerDeviceReadBytesPollster, 'disk.device.read.bytes', 2, 'vda2') def test_per_disk_write_bytes(self): self._check_per_device_samples(disk.PerDeviceWriteBytesPollster, 'disk.device.write.bytes', 3, 'vda1') self._check_per_device_samples(disk.PerDeviceWriteBytesPollster, 'disk.device.write.bytes', 5, 'vda2')
class TestDiskRatePollsters(TestBaseDiskIO): DISKS = [(virt_inspector.Disk(device='disk1'), virt_inspector.DiskRateStats(1024, 300, 5120, 700)), (virt_inspector.Disk(device='disk2'), virt_inspector.DiskRateStats(2048, 400, 6144, 800))] TYPE = 'gauge' CACHE_KEY = "CACHE_KEY_DISK_RATE" def setUp(self): super(TestDiskRatePollsters, self).setUp() self.inspector.inspect_disk_rates = mock.Mock(return_value=self.DISKS) def test_disk_read_bytes_rate(self): self._check_aggregate_samples(disk.ReadBytesRatePollster, 'disk.read.bytes.rate', 3072, expected_device=['disk1', 'disk2']) def test_disk_read_requests_rate(self): self._check_aggregate_samples(disk.ReadRequestsRatePollster, 'disk.read.requests.rate', 700, expected_device=['disk1', 'disk2']) def test_disk_write_bytes_rate(self): self._check_aggregate_samples(disk.WriteBytesRatePollster, 'disk.write.bytes.rate', 11264, expected_device=['disk1', 'disk2']) def test_disk_write_requests_rate(self): self._check_aggregate_samples(disk.WriteRequestsRatePollster, 'disk.write.requests.rate', 1500, expected_device=['disk1', 'disk2']) def test_per_disk_read_bytes_rate(self): self._check_per_device_samples(disk.PerDeviceReadBytesRatePollster, 'disk.device.read.bytes.rate', 1024, 'disk1') self._check_per_device_samples(disk.PerDeviceReadBytesRatePollster, 'disk.device.read.bytes.rate', 2048, 'disk2') def test_per_disk_read_requests_rate(self): self._check_per_device_samples(disk.PerDeviceReadRequestsRatePollster, 'disk.device.read.requests.rate', 300, 'disk1') self._check_per_device_samples(disk.PerDeviceReadRequestsRatePollster, 'disk.device.read.requests.rate', 400, 'disk2') def test_per_disk_write_bytes_rate(self): self._check_per_device_samples(disk.PerDeviceWriteBytesRatePollster, 'disk.device.write.bytes.rate', 5120, 'disk1') self._check_per_device_samples(disk.PerDeviceWriteBytesRatePollster, 'disk.device.write.bytes.rate', 6144, 'disk2') def test_per_disk_write_requests_rate(self): self._check_per_device_samples(disk.PerDeviceWriteRequestsRatePollster, 'disk.device.write.requests.rate', 700, 'disk1') self._check_per_device_samples(disk.PerDeviceWriteRequestsRatePollster, 'disk.device.write.requests.rate', 800, 'disk2')
def inspect_disk_iops(self, instance): """Inspect the Disk Input/Output operations per second for an instance. The response is a generator of the values. :param instance: the target instance :return disk: The Disk indicating the device for the storage device. :return stats: The DiskIOPSStats indicating the I/O operations per second for the device. """ # Get the current and previous sample. Delta is performed between # these two. uuid = self._puuid(instance) cur_date, cur_metric = self.vm_metrics.get_latest_metric(uuid) prev_date, prev_metric = self.vm_metrics.get_previous_metric(uuid) # If the cur_metric is none, then the instance can not be found in the # sample and an error should be raised. if cur_metric is None: raise virt_inspector.InstanceNotFoundException( _('VM %s not found in PowerVM Metrics Sample.') % instance.name) # If there isn't storage information, this may be because the Virtual # I/O Metrics were turned off. If the previous metric is unavailable, # also have to pass through this method. if (cur_metric.storage is None or prev_metric is None or prev_metric.storage is None): LOG.debug("Current storage metric was unavailable from the API " "instance %s." % instance.name) return # Need to determine the time delta between the samples. This is # usually 30 seconds from the API, but the metrics will be specific. # However, if there is no previous sample, then we have to estimate. # Therefore, we estimate 15 seconds - half of the standard 30 seconds. date_delta = ((cur_date - prev_date) if prev_date is not None else datetime.timedelta(seconds=15)) # Bundle together the SCSI and virtual FC adapters cur_adpts = (cur_metric.storage.virt_adpts + cur_metric.storage.vfc_adpts) prev_adpts = (prev_metric.storage.virt_adpts + prev_metric.storage.vfc_adpts) def find_prev(cur_adpt): for prev_adpt in prev_adpts: if prev_adpt.name == cur_adpt.name: return prev_adpt return None # Loop through all the storage adapters for cur_adpt in cur_adpts: # IOPs is the read/write counts of the current - prev divided by # second difference between the two, rounded to the integer. :-) cur_ops = cur_adpt.num_reads + cur_adpt.num_writes # The previous adapter may be None. This simply indicates that the # adapter was added between the previous sample and this one. It # does not indicate a live migrate scenario like noted above, as # the VM itself hasn't moved. prev_adpt = find_prev(cur_adpt) prev_ops = ((prev_adpt.num_reads + prev_adpt.num_writes) if prev_adpt else 0) iops = (cur_ops - prev_ops) // date_delta.seconds # PowerVM only shows the connection (SCSI or FC). Name after # the connection name disk = virt_inspector.Disk(device=cur_adpt.name) stats = virt_inspector.DiskIOPSStats(iops_count=iops) yield (disk, stats)