def test_with_vms(self): cif = fake.ClientIF() with fake.VM(params={'vmId': 'testvm1'}, cif=cif) as testvm1: with fake.VM(params={'vmId': 'testvm2'}, cif=cif) as testvm2: vms = cif.getVMs() self.assertEqual(len(vms), 2) self.assertIn(testvm1.id, vms) self.assertIn(testvm2.id, vms)
def test_without_any_vms(self): with namedTemporaryDir() as tmpdir: with MonkeyPatchScope([(constants, 'P_VDSM_RUN', tmpdir + '/'), (recovery, '_list_domains', lambda: [])]): fakecif = fake.ClientIF() recovery.all_vms(fakecif) self.assertEqual(fakecif.vmContainer, {})
def setUp(self): self.cif = fake.ClientIF() self.conn = FakeConnection() self.patch = Patch([ (containersconnection, 'recovery', lambda *args: []), (libvirtconnection, 'get', lambda *args, **kwargs: self.conn), ]) self.patch.apply()
def test_load(self): with self.setup_env() as (testvm, tmpdir): stored = recovery.File(testvm.id) stored.save(testvm) loaded = recovery.File(testvm.id) fakecif = fake.ClientIF() res = loaded.load(fakecif) self.assertTrue(res) self.assertVmStatus(testvm, fakecif.vmRequests[testvm.id][0])
def test_load_with_createVm_error(self, createVm): with self.setup_env() as (testvm, tmpdir): stored = recovery.File(testvm.id) stored.save(testvm) loaded = recovery.File(testvm.id) fakecif = fake.ClientIF() fakecif.createVm = createVm res = loaded.load(fakecif) self.assertFalse(res) self.assertEqual(fakecif.vmContainer, {}) self.assertEqual(fakecif.vmRequests, {})
def setUp(self): self.vm_uuids = ('a', 'b',) self.vm_is_ext = [False] * len(self.vm_uuids) self.cif = fake.ClientIF() self.conn = FakeConnection() self.patch = Patch([ (libvirtconnection, 'get', lambda *args, **kwargs: self.conn), ]) self.patch.apply() # must be after patch.apply() self.conn.domains = _make_domains_collection( list(zip(self.vm_uuids, self.vm_is_ext)) )
def test_start_with_invalid_operation(self): """ periodic.start() should swallow any error that periodic.Operation.start() may raise, and keep starting the other operations after the failed one. """ lock = threading.Lock() done = threading.Event() def _work(): with lock: self.tasks -= 1 if not self.tasks: done.set() ops = [ periodic.Operation(_work, period=1.0, scheduler=self.sched, executor=self.exc), # will raise periodic.InvalidValue periodic.Operation(lambda: None, period=0, scheduler=self.sched, executor=self.exc), periodic.Operation(_work, period=1.0, scheduler=self.sched, executor=self.exc), ] with MonkeyPatchScope([ (periodic, 'config', make_config([('sampling', 'enable', 'false')])), (periodic, '_create', lambda cif, sched: ops), ]): # Don't assume operations are started in order, # we just know all of them will be start()ed. # See the documentation of periodic.start() periodic.start(fake.ClientIF(), self.sched) done.wait(0.5) self.assertTrue(done.is_set())
def test_clean_vm_files(self): with fake.VM() as testvm, namedTemporaryDir() as tmpdir: with MonkeyPatchScope([(constants, 'P_VDSM_RUN', tmpdir + '/')]): stored = recovery.File(testvm.id) stored.save(testvm) loaded = recovery.File(testvm.id) fakecif = fake.ClientIF() loaded.load(fakecif) # we have one recovery file (just created) self.assertEqual(len(os.listdir(tmpdir)), 1) # ...but somehow ClientIF failed to create the VM. self.assertEqual(fakecif.vmContainer, {}) # ... so we can actually do our test. recovery.clean_vm_files(fakecif) self.assertEqual(os.listdir(tmpdir), [])
def test_compat41(self): expected_conf = json.loads(read_data('vm_compat41.json'))[0] vm_params = recovery._recovery_params(expected_conf['vmId'], read_data('vm_compat41.xml'), False) vm_obj = vm.Vm(fake.ClientIF(), vm_params, recover=True) # TODO: ugly hack, but we don't have APIs to do that vm_obj._devices = vm_obj._make_devices() recovered_conf = vm_obj.status(fullStatus=True) self.assert_conf_equal(recovered_conf, expected_conf, filter_vm_conf_keys) self.assert_devices_conf_equal(recovered_conf['devices'], expected_conf['devices'], IGNORED_DEVICE_TYPES)
def setUp(self): self.vm = FakeVM(self.log) self.cif = fake.ClientIF() self.xml_str = read_data('hostedengine_lease.xml') self.dom = xmlutils.fromstring(self.xml_str) self.disk_devs = domxml_preprocess._make_disk_devices( self.xml_str, self.log) self.driveVolInfo = { 'leasePath': '/fake/drive/lease/path', 'leaseOffset': 42, } self.vmVolInfo = { # from XML 'leasePath': 'LEASE-PATH:' '9eaa286e-37d6-429e-a46b-63bec1dd4868:' '4f0a775f-ed16-4832-ab9f-f0427f33ab92', 'leaseOffset': 'LEASE-OFFSET:' '9eaa286e-37d6-429e-a46b-63bec1dd4868:' '4f0a775f-ed16-4832-ab9f-f0427f33ab92', }
def setUp(self): self.cif = fake.ClientIF() self.scheduler = schedule.Scheduler(name="test.Scheduler", clock=monotonic_time) self.scheduler.start() self.log = logging.getLogger("test") self.qga_poller = qemuguestagent.QemuGuestAgentPoller( self.cif, self.log, self.scheduler) self.vm = FakeVM() self.qga_poller.update_caps( self.vm.id, { 'version': '0.0-test', 'commands': [ qemuguestagent._QEMU_ACTIVE_USERS_COMMAND, qemuguestagent._QEMU_GUEST_INFO_COMMAND, qemuguestagent._QEMU_HOST_NAME_COMMAND, qemuguestagent._QEMU_NETWORK_INTERFACES_COMMAND, qemuguestagent._QEMU_OSINFO_COMMAND, qemuguestagent._QEMU_TIMEZONE_COMMAND, ] })
def setUp(self): self.cif = fake.ClientIF()
def test_empty(self): cif = fake.ClientIF() self.assertFalse(cif.getVMs())
def setUp(self): self.cif = fake.ClientIF() self._make_fake_vms() _Visitor.VMS.clear()
def setUp(self): self.cif = fake.ClientIF() self.serv = fake.JsonRpcServer() self.cif.bindings["jsonrpc"] = self.serv