def test_monitoring_needed(): class FakeDrive: def __init__(self, flag): self.flag = flag def needs_monitoring(self): return self.flag vm = FakeVM() mon = thinp.VolumeMonitor(vm, vm.log) assert not mon.monitoring_needed() vm.drives.append(FakeDrive(False)) assert not mon.monitoring_needed() vm.drives.append(FakeDrive(True)) assert mon.monitoring_needed() vm.drives.append(FakeDrive(False)) assert mon.monitoring_needed() mon.disable() assert not mon.monitoring_needed() mon.enable() assert mon.monitoring_needed() vm.drives[1].flag = False assert not mon.monitoring_needed()
def __init__(self, drive_infos): self._dom = FakeDomain() self.cif = FakeClientIF(FakeIRS()) self.id = 'volume_monitor_vm' # Simplify testing by dispatching on the calling thread. self.volume_monitor = thinp.VolumeMonitor( self, self.log, dispatch=lambda func, **kw: func()) self.block_stats = {} disks = [] for drive_conf, block_info in drive_infos: drive = make_drive(self.log, drive_conf, block_info) self.cif.irs.set_drive_size(drive, block_info['physical']) self._dom.add_drive(drive, block_info) disks.append(drive) self.block_stats[block_info["backingIndex"]] = block_info self._devices = {hwclass.DISK: disks} # needed for pause()/cont() self._lastStatus = vmstatus.UP self._guestCpuRunning = True self._custom = {} self._confLock = threading.Lock() self.conf = {} self._guestCpuLock = TimedAcquireLock(self.id) self._resume_behavior = 'auto_resume' self._pause_time = None
def __init__(self, config): self._dom = FakeDomain(config) self.log = logging.getLogger() self.cif = fake.ClientIF() self._domain = DomainDescriptor(config.xmls["00-before.xml"]) self.id = self._domain.id self._md_desc = metadata.Descriptor.from_xml( config.xmls["00-before.xml"]) drive = config.values["drive"] self._devices = { "disk": [ storage.Drive(**drive, volumeChain=xml_chain( config.xmls["00-before.xml"]), log=self.log) ] } # Add the drives to to IRS: self.cif.irs.prepared_volumes = { (drive["domainID"], drive["imageID"], vol_id): vol_info for vol_id, vol_info in config.values["volumes"].items() } self.conf = self._conf_devices(config) self.conf["vmId"] = config.values["vm-id"] self.conf["xml"] = config.xmls["00-before.xml"] self._external = False # Used when syncing metadata. self.volume_monitor = thinp.VolumeMonitor(self, self.log) self._confLock = threading.Lock() self._drive_merger = DriveMerger(self) self._migrationSourceThread = migration.SourceThread(self)
def test_clear_threshold(): vm = FakeVM() mon = thinp.VolumeMonitor(vm, vm.log) # one drive (virtio, 0) vda = make_drive(vm.log, index=0, iface='virtio') # clear the 1st element in the backing chain of the drive mon.clear_threshold(vda, 1) assert vm._dom.thresholds == [('vda[1]', 0)]
def test_on_block_threshold_unknown_drive(): vm = FakeVM() dispatch = FakeDispatch() mon = thinp.VolumeMonitor(vm, vm.log, dispatch=dispatch) vda = make_drive(vm.log, index=0, iface='virtio') vm.drives.append(vda) mon.on_block_threshold("vdb", "/unkown/path", 512 * MiB, 10 * MiB) assert vda.threshold_state == BLOCK_THRESHOLD.UNSET assert len(dispatch.calls) == 0
def __init__(self, dom=None): self._dom = dom self.id = str(uuid.uuid4()) self.log = logging.getLogger('test.migration.FakeVM') self.conf = {} self._mem_size_mb = 128 self.hasSpice = True self.post_copy = migration.PostCopyPhase.NONE self.stopped_migrated_event_processed = threading.Event() self.stopped_migrated_event_processed.set() self.guestAgent = FakeGuestAgent() self.hibernation_attempts = 0 self.volume_monitor = thinp.VolumeMonitor(self, self.log, enabled=True)
def test_set_threshold(): vm = FakeVM() mon = thinp.VolumeMonitor(vm, vm.log) vda = make_drive(vm.log, index=0, iface='virtio') vm.drives.append(vda) apparentsize = 4 * GiB chunk_size = config.getint("irs", "volume_utilization_chunk_mb") * MiB free = (100 - config.getint("irs", "volume_utilization_percent")) / 100 threshold = chunk_size * free # TODO: Use public API. mon._set_threshold(vda, apparentsize, 1) expected = apparentsize - threshold assert vm._dom.thresholds == [('vda[1]', expected)]
def test_on_enospc(): vm = FakeVM() dispatch = FakeDispatch() mon = thinp.VolumeMonitor(vm, vm.log, dispatch=dispatch) vda = make_drive(vm.log, index=0, iface='virtio') vm.drives.append(vda) mon.on_enospc(vda) assert vda.threshold_state == BLOCK_THRESHOLD.EXCEEDED assert len(dispatch.calls) == 1 part, args = dispatch.calls[0] assert part.func == mon._extend_drive assert part.args == (vda,) assert args == dict(timeout=EXTEND_TIMEOUT, discard=True)
def test_set_threshold_drive_too_small(): # We seen the storage subsystem creating drive too small, # less than the minimum supported size, 1GiB. # While this is a storage issue, the volume monitor should # be fixed no never set negative thresholds. vm = FakeVM() mon = thinp.VolumeMonitor(vm, vm.log) vda = make_drive(vm.log, index=0, iface='virtio') vm.drives.append(vda) apparentsize = 128 * MiB # TODO: Use public API. mon._set_threshold(vda, apparentsize, 3) target, value = vm._dom.thresholds[0] assert target == 'vda[3]' assert value >= 1
def __init__(self): self.log = logging.getLogger() self.volume_monitor = thinp.VolumeMonitor(self, self.log)
def test_disable_runtime(): vm = FakeVM() mon = thinp.VolumeMonitor(vm, vm.log, enabled=True) mon.disable() assert mon.enabled() is False
def test_enable_on_create(enabled): vm = FakeVM() mon = thinp.VolumeMonitor(vm, vm.log, enabled=enabled) assert mon.enabled() == enabled