Beispiel #1
0
    def testSamplesWraparound(self):
        NUM = sampling.HOST_STATS_AVERAGING_WINDOW + 1

        samples = sampling.SampleWindow(
            sampling.HOST_STATS_AVERAGING_WINDOW)

        class FakeHostSample(object):

            counter = 0

            def __repr__(self):
                return "FakeHostSample(id=%i)" % self.id

            def __init__(self, *args):
                self.id = FakeHostSample.counter
                FakeHostSample.counter += 1

        with MonkeyPatchScope([(sampling, 'HostSample', FakeHostSample)]):
            hs = sampling.HostMonitor(samples=samples)
            for _ in range(NUM):
                hs()

            first, last, _ = samples.stats()
            self.assertEqual(first.id,
                             FakeHostSample.counter -
                             sampling.HOST_STATS_AVERAGING_WINDOW)
            self.assertEqual(last.id,
                             FakeHostSample.counter - 1)
Beispiel #2
0
def start(cif, scheduler):
    global _operations
    global _executor

    _executor = executor.Executor(name="periodic",
                                  workers_count=_WORKERS,
                                  max_tasks=_TASKS,
                                  scheduler=scheduler,
                                  max_workers=_MAX_WORKERS)
    _executor.start()

    def per_vm_operation(func, period):
        disp = VmDispatcher(cif.getVMs, _executor, func, _timeout_from(period))
        return Operation(disp, period, scheduler)

    _operations = [
        # Needs dispatching because updating the volume stats needs
        # access to the storage, thus can block.
        per_vm_operation(UpdateVolumes,
                         config.getint('irs', 'vol_size_sample_interval')),

        # Job monitoring need QEMU monitor access.
        per_vm_operation(BlockjobMonitor,
                         config.getint('vars', 'vm_sample_jobs_interval')),

        # We do this only until we get high water mark notifications
        # from QEMU. It accesses storage and/or QEMU monitor, so can block,
        # thus we need dispatching.
        per_vm_operation(DriveWatermarkMonitor,
                         config.getint('vars', 'vm_watermark_interval')),
        Operation(lambda: recovery.lookup_external_vms(cif),
                  config.getint('sampling', 'external_vm_lookup_interval'),
                  scheduler,
                  exclusive=True,
                  discard=False),
        Operation(containersconnection.monitor,
                  config.getint('vars', 'vm_sample_interval'), scheduler),
    ]

    if config.getboolean('sampling', 'enable'):
        _operations.extend([
            # libvirt sampling using bulk stats can block, but unresponsive
            # domains are handled inside VMBulkstatsMonitor for performance
            # reasons; thus, does not need dispatching.
            Operation(
                sampling.VMBulkstatsMonitor(libvirtconnection.get(cif),
                                            cif.getVMs, sampling.stats_cache),
                config.getint('vars', 'vm_sample_interval'), scheduler),
            Operation(sampling.HostMonitor(cif=cif),
                      config.getint('vars', 'host_sample_stats_interval'),
                      scheduler,
                      timeout=config.getint('vars',
                                            'host_sample_stats_interval'),
                      exclusive=True,
                      discard=False),
        ])
        host.stats.start()

    for op in _operations:
        op.start()
Beispiel #3
0
def _create(cif, scheduler):
    def per_vm_operation(func, period):
        disp = VmDispatcher(cif.getVMs, _executor, func, _timeout_from(period))
        return Operation(disp, period, scheduler)

    ops = [
        # Needs dispatching because updating the volume stats needs
        # access to the storage, thus can block.
        per_vm_operation(UpdateVolumes,
                         config.getint('irs', 'vol_size_sample_interval')),

        # Job monitoring need QEMU monitor access.
        per_vm_operation(BlockjobMonitor,
                         config.getint('vars', 'vm_sample_jobs_interval')),

        # We do this only until we get high water mark notifications
        # from QEMU. It accesses storage and/or QEMU monitor, so can block,
        # thus we need dispatching.
        per_vm_operation(DriveWatermarkMonitor,
                         config.getint('vars', 'vm_watermark_interval')),
        per_vm_operation(
            NvramDataMonitor,
            config.getint('sampling', 'nvram_data_update_interval')),
        per_vm_operation(TpmDataMonitor,
                         config.getint('sampling',
                                       'tpm_data_update_interval')),
        Operation(lambda: recovery.lookup_external_vms(cif),
                  config.getint('sampling', 'external_vm_lookup_interval'),
                  scheduler,
                  exclusive=True,
                  discard=False),
        Operation(lambda: _kill_long_paused_vms(cif),
                  config.getint('vars', 'vm_kill_paused_time') // 2,
                  scheduler,
                  exclusive=True,
                  discard=False),
    ]

    if config.getboolean('sampling', 'enable'):
        ops.extend([
            # libvirt sampling using bulk stats can block, but unresponsive
            # domains are handled inside VMBulkstatsMonitor for performance
            # reasons; thus, does not need dispatching.
            Operation(
                sampling.VMBulkstatsMonitor(libvirtconnection.get(cif),
                                            cif.getVMs, sampling.stats_cache),
                config.getint('vars', 'vm_sample_interval'), scheduler),
            Operation(sampling.HostMonitor(cif=cif),
                      config.getint('vars', 'host_sample_stats_interval'),
                      scheduler,
                      timeout=config.getint('vars',
                                            'host_sample_stats_interval'),
                      exclusive=True,
                      discard=False),
        ])

    return ops
Beispiel #4
0
def start(cif, scheduler):
    global _operations
    global _executor

    _executor = executor.Executor(name="periodic",
                                  workers_count=_WORKERS,
                                  max_tasks=_TASKS,
                                  scheduler=scheduler,
                                  max_workers=_MAX_WORKERS)
    _executor.start()

    def per_vm_operation(func, period):
        disp = VmDispatcher(cif.getVMs, _executor, func, _timeout_from(period))
        return Operation(disp, period, scheduler)

    _operations = [
        # Needs dispatching because updating the volume stats needs
        # access to the storage, thus can block.
        per_vm_operation(UpdateVolumes,
                         config.getint('irs', 'vol_size_sample_interval')),

        # Needs dispatching because it accesses FS and libvirt data.
        # Ignored by new engine, has to be kept for BC sake.
        per_vm_operation(NumaInfoMonitor,
                         config.getint('vars', 'vm_sample_numa_interval')),

        # Job monitoring need QEMU monitor access.
        per_vm_operation(BlockjobMonitor,
                         config.getint('vars', 'vm_sample_jobs_interval')),

        # libvirt sampling using bulk stats can block, but unresponsive
        # domains are handled inside VMBulkSampler for performance reasons;
        # thus, does not need dispatching.
        Operation(
            sampling.VMBulkSampler(libvirtconnection.get(cif), cif.getVMs,
                                   sampling.stats_cache),
            config.getint('vars', 'vm_sample_interval'), scheduler),

        # We do this only until we get high water mark notifications
        # from QEMU. It accesses storage and/or QEMU monitor, so can block,
        # thus we need dispatching.
        per_vm_operation(DriveWatermarkMonitor,
                         config.getint('vars', 'vm_watermark_interval')),
        Operation(sampling.HostMonitor(cif=cif),
                  config.getint('vars', 'host_sample_stats_interval'),
                  scheduler),
        Operation(containersconnection.monitor,
                  config.getint('vars', 'vm_sample_interval'), scheduler),
    ]

    host.stats.start()

    for op in _operations:
        op.start()