def test_instance_fault_get_by_instance(self): """ ensure we can retrieve an instance fault by instance UUID """ ctxt = context.get_admin_context() instance1 = db.instance_create(ctxt, {}) instance2 = db.instance_create(ctxt, {}) uuids = [instance1["uuid"], instance2["uuid"]] # Create faults fault_values = {"message": "message", "details": "detail", "instance_uuid": uuids[0], "code": 404} fault1 = db.instance_fault_create(ctxt, fault_values) fault_values = {"message": "message", "details": "detail", "instance_uuid": uuids[0], "code": 500} fault2 = db.instance_fault_create(ctxt, fault_values) fault_values = {"message": "message", "details": "detail", "instance_uuid": uuids[1], "code": 404} fault3 = db.instance_fault_create(ctxt, fault_values) fault_values = {"message": "message", "details": "detail", "instance_uuid": uuids[1], "code": 500} fault4 = db.instance_fault_create(ctxt, fault_values) instance_faults = db.instance_fault_get_by_instance_uuids(ctxt, uuids) expected = {uuids[0]: [fault2, fault1], uuids[1]: [fault4, fault3]} self.assertEqual(instance_faults, expected)
def test_describe_instances(self): """Makes sure describe_instances works and filters results.""" inst1 = db.instance_create(self.context, {'reservation_id': 'a', 'host': 'host1'}) inst2 = db.instance_create(self.context, {'reservation_id': 'a', 'host': 'host2'}) comp1 = db.service_create(self.context, {'host': 'host1', 'availability_zone': 'zone1', 'topic': "compute"}) comp2 = db.service_create(self.context, {'host': 'host2', 'availability_zone': 'zone2', 'topic': "compute"}) result = self.cloud.describe_instances(self.context) result = result['reservationSet'][0] self.assertEqual(len(result['instancesSet']), 2) instance_id = cloud.id_to_ec2_id(inst2['id']) result = self.cloud.describe_instances(self.context, instance_id=[instance_id]) result = result['reservationSet'][0] self.assertEqual(len(result['instancesSet']), 1) self.assertEqual(result['instancesSet'][0]['instanceId'], instance_id) self.assertEqual(result['instancesSet'][0] ['placement']['availabilityZone'], 'zone2') db.instance_destroy(self.context, inst1['id']) db.instance_destroy(self.context, inst2['id']) db.service_destroy(self.context, comp1['id']) db.service_destroy(self.context, comp2['id'])
def test_create(self): self.mox.StubOutWithMock(db, 'instance_create') db.instance_create(self.context, {}).AndReturn(self.fake_instance) self.mox.ReplayAll() inst = instance.Instance() inst.create(self.context) self.assertEqual(self.fake_instance['id'], inst.id)
def test_instance_get_all_by_filters_unicode_value(self): args = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1', 'display_name': u'test♥'} db.instance_create(self.context, args) result = db.instance_get_all_by_filters(self.context.elevated(), {'display_name': u'test'}) self.assertEqual(1, len(result))
def test_create_stubbed(self): self.mox.StubOutWithMock(db, "instance_create") vals = {"host": "foo-host", "memory_mb": 128, "system_metadata": {"foo": "bar"}} fake_inst = fake_instance.fake_db_instance(**vals) db.instance_create(self.context, vals).AndReturn(fake_inst) self.mox.ReplayAll() inst = instance.Instance(host="foo-host", memory_mb=128, system_metadata={"foo": "bar"}) inst.create(self.context)
def test_instance_faults_get_by_instance_uuids_no_faults(self): """None should be returned when no faults exist""" ctxt = context.get_admin_context() instance1 = db.instance_create(ctxt, {}) instance2 = db.instance_create(ctxt, {}) uuids = [instance1['uuid'], instance2['uuid']] instance_faults = db.instance_fault_get_by_instance_uuids(ctxt, uuids) expected = {uuids[0]: [], uuids[1]: []} self.assertEqual(expected, instance_faults)
def _setUpBlockDeviceMapping(self): image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175' sys_meta = flavors.save_flavor_info( {}, flavors.get_flavor(1)) inst1 = db.instance_create(self.context, {'image_ref': image_uuid, 'instance_type_id': 1, 'root_device_name': '/dev/sdb1', 'system_metadata': sys_meta}) inst2 = db.instance_create(self.context, {'image_ref': image_uuid, 'instance_type_id': 1, 'root_device_name': '/dev/sdc1', 'system_metadata': sys_meta}) instance_uuid = inst1['uuid'] mappings0 = [ {'instance_uuid': instance_uuid, 'device_name': '/dev/sdb1', 'snapshot_id': '1', 'volume_id': '2'}, {'instance_uuid': instance_uuid, 'device_name': '/dev/sdb2', 'volume_id': '3', 'volume_size': 1}, {'instance_uuid': instance_uuid, 'device_name': '/dev/sdb3', 'delete_on_termination': True, 'snapshot_id': '4', 'volume_id': '5'}, {'instance_uuid': instance_uuid, 'device_name': '/dev/sdb4', 'delete_on_termination': False, 'snapshot_id': '6', 'volume_id': '7'}, {'instance_uuid': instance_uuid, 'device_name': '/dev/sdb5', 'snapshot_id': '8', 'volume_id': '9', 'volume_size': 0}, {'instance_uuid': instance_uuid, 'device_name': '/dev/sdb6', 'snapshot_id': '10', 'volume_id': '11', 'volume_size': 1}, {'instance_uuid': instance_uuid, 'device_name': '/dev/sdb7', 'no_device': True}, {'instance_uuid': instance_uuid, 'device_name': '/dev/sdb8', 'virtual_name': 'swap'}, {'instance_uuid': instance_uuid, 'device_name': '/dev/sdb9', 'virtual_name': 'ephemeral3'}] volumes = self._block_device_mapping_create(instance_uuid, mappings0) return (inst1, inst2, volumes)
def test_instance_get_all_by_filters_deleted(self): args1 = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1'} inst1 = db.instance_create(self.context, args1) args2 = {'reservation_id': 'b', 'image_ref': 1, 'host': 'host1'} inst2 = db.instance_create(self.context, args2) db.instance_destroy(self.context, inst1.id) result = db.instance_get_all_by_filters(self.context.elevated(), {}) self.assertEqual(1, len(result)) self.assertEqual(result[0].id, inst2.id)
def test_instance_get_all_by_filters_deleted(self): args1 = {"reservation_id": "a", "image_ref": 1, "host": "host1"} inst1 = db.instance_create(self.context, args1) args2 = {"reservation_id": "b", "image_ref": 1, "host": "host1"} inst2 = db.instance_create(self.context, args2) db.instance_destroy(self.context, inst1.id) result = db.instance_get_all_by_filters(self.context.elevated(), {}) self.assertEqual(2, len(result)) self.assertEqual(result[0].id, inst2.id) self.assertEqual(result[1].id, inst1.id) self.assertTrue(result[1].deleted)
def test_create_stubbed(self): self.mox.StubOutWithMock(db, 'instance_create') vals = {'host': 'foo-host', 'memory_mb': 128, 'system_metadata': {'foo': 'bar'}} fake_inst = fake_instance.fake_db_instance(**vals) db.instance_create(self.context, vals).AndReturn(fake_inst) self.mox.ReplayAll() inst = instance.Instance(host='foo-host', memory_mb=128, system_metadata={'foo': 'bar'}) inst.create(self.context)
def test_instance_get_all_by_filters_deleted(self): args1 = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1'} inst1 = db.instance_create(self.context, args1) args2 = {'reservation_id': 'b', 'image_ref': 1, 'host': 'host1'} inst2 = db.instance_create(self.context, args2) db.instance_destroy(self.context.elevated(), inst1['uuid']) result = db.instance_get_all_by_filters(self.context.elevated(), {}) self.assertEqual(2, len(result)) self.assertIn(inst1.id, [result[0].id, result[1].id]) self.assertIn(inst2.id, [result[0].id, result[1].id]) if inst1.id == result[0].id: self.assertTrue(result[0].deleted) else: self.assertTrue(result[1].deleted)
def test_instance_fault_get_by_instance(self): """ ensure we can retrieve an instance fault by instance UUID """ ctxt = context.get_admin_context() instance1 = db.instance_create(ctxt, {}) instance2 = db.instance_create(ctxt, {}) uuids = [instance1['uuid'], instance2['uuid']] # Create faults fault_values = { 'message': 'message', 'details': 'detail', 'instance_uuid': uuids[0], 'code': 404, } fault1 = db.instance_fault_create(ctxt, fault_values) fault_values = { 'message': 'message', 'details': 'detail', 'instance_uuid': uuids[0], 'code': 500, } fault2 = db.instance_fault_create(ctxt, fault_values) fault_values = { 'message': 'message', 'details': 'detail', 'instance_uuid': uuids[1], 'code': 404, } fault3 = db.instance_fault_create(ctxt, fault_values) fault_values = { 'message': 'message', 'details': 'detail', 'instance_uuid': uuids[1], 'code': 500, } fault4 = db.instance_fault_create(ctxt, fault_values) instance_faults = db.instance_fault_get_by_instance_uuids(ctxt, uuids) expected = { uuids[0]: [fault2, fault1], uuids[1]: [fault4, fault3], } self.assertEqual(instance_faults, expected)
def _setUpBlockDeviceMapping(self): image_uuid = "cedef40a-ed67-4d10-800e-17455edce175" inst1 = db.instance_create( self.context, {"image_ref": image_uuid, "instance_type_id": 1, "root_device_name": "/dev/sdb1"} ) inst2 = db.instance_create( self.context, {"image_ref": image_uuid, "instance_type_id": 1, "root_device_name": "/dev/sdc1"} ) instance_uuid = inst1["uuid"] mappings0 = [ {"instance_uuid": instance_uuid, "device_name": "/dev/sdb1", "snapshot_id": "1", "volume_id": "2"}, {"instance_uuid": instance_uuid, "device_name": "/dev/sdb2", "volume_id": "3", "volume_size": 1}, { "instance_uuid": instance_uuid, "device_name": "/dev/sdb3", "delete_on_termination": True, "snapshot_id": "4", "volume_id": "5", }, { "instance_uuid": instance_uuid, "device_name": "/dev/sdb4", "delete_on_termination": False, "snapshot_id": "6", "volume_id": "7", }, { "instance_uuid": instance_uuid, "device_name": "/dev/sdb5", "snapshot_id": "8", "volume_id": "9", "volume_size": 0, }, { "instance_uuid": instance_uuid, "device_name": "/dev/sdb6", "snapshot_id": "10", "volume_id": "11", "volume_size": 1, }, {"instance_uuid": instance_uuid, "device_name": "/dev/sdb7", "no_device": True}, {"instance_uuid": instance_uuid, "device_name": "/dev/sdb8", "virtual_name": "swap"}, {"instance_uuid": instance_uuid, "device_name": "/dev/sdb9", "virtual_name": "ephemeral3"}, ] volumes = self._block_device_mapping_create(instance_uuid, mappings0) return (inst1, inst2, volumes)
def test_get_disk_mapping_blockdev_eph(self): # A disk mapping with a blockdev replacing the ephemeral device user_context = context.RequestContext(self.user_id, self.project_id) instance_ref = db.instance_create(user_context, self.test_instance) block_device_info = { 'block_device_mapping': [ {'connection_info': "fake", 'mount_device': "/dev/vdb", 'boot_index': -1, 'delete_on_termination': True}, ] } mapping = blockinfo.get_disk_mapping("kvm", instance_ref, "virtio", "ide", block_device_info) expect = { 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, '/dev/vdb': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'}, 'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, } self.assertEqual(mapping, expect)
def test_finish_migrate_no_resize_vdi(self): instance = db.instance_create(self.context, self.values) def fake_vdi_resize(*args, **kwargs): raise Exception("This shouldn't be called") self.stubs.Set(stubs.FakeSessionForMigrationTests, "VDI_resize_online", fake_vdi_resize) stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests) stubs.stubout_loopingcall_start(self.stubs) conn = xenapi_conn.get_connection(False) network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False}, {'broadcast': '192.168.0.255', 'dns': ['192.168.0.1'], 'gateway': '192.168.0.1', 'gateway6': 'dead:beef::1', 'ip6s': [{'enabled': '1', 'ip': 'dead:beef::dcad:beff:feef:0', 'netmask': '64'}], 'ips': [{'enabled': '1', 'ip': '192.168.0.100', 'netmask': '255.255.255.0'}], 'label': 'fake', 'mac': 'DE:AD:BE:EF:00:00', 'rxtx_cap': 3})] # Resize instance would be determined by the compute call conn.finish_migration(instance, dict(base_copy='hurr', cow='durr'), network_info, resize_instance=False)
def test_prep_resize_post_populates_retry(self): """Prep resize should add a 'host' entry to the retry dict""" sched = fakes.FakeFilterScheduler() image = 'image' instance = db.instance_create(self.context, {}) instance_properties = {'project_id': 'fake', 'os_type': 'Linux'} instance_type = instance_types.get_instance_type_by_name("m1.tiny") request_spec = {'instance_properties': instance_properties, 'instance_type': instance_type} retry = {'hosts': [], 'num_attempts': 1} filter_properties = {'retry': retry} reservations = None host = fakes.FakeHostState('host', 'node', {}) weighted_host = least_cost.WeightedHost(1, host) hosts = [weighted_host] self.mox.StubOutWithMock(sched, '_schedule') self.mox.StubOutWithMock(sched.compute_rpcapi, 'prep_resize') sched._schedule(self.context, request_spec, filter_properties, [instance['uuid']]).AndReturn(hosts) sched.compute_rpcapi.prep_resize(self.context, image, instance, instance_type, 'host', reservations, request_spec=request_spec, filter_properties=filter_properties) self.mox.ReplayAll() sched.schedule_prep_resize(self.context, image, request_spec, filter_properties, instance, instance_type, reservations) self.assertEqual(['host'], filter_properties['retry']['hosts'])
def test_destroy_with_not_equal_constraint_met(self): ctx = context.get_admin_context() instance = db.instance_create(ctx, {'task_state': 'deleting'}) constraint = db.constraint(task_state=db.not_equal('error', 'resize')) db.instance_destroy(ctx, instance['uuid'], constraint) self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid, ctx, instance['uuid'])
def _test_create_image(self, cache_type): sys_meta = {'image_cache_in_nova': True} instance = db.instance_create(self.context, {'system_metadata': sys_meta}) instance = db.instance_get_by_uuid(self.context, instance['uuid']) self.flags(cache_images=cache_type) was = {'called': None} def fake_create_cached_image(*args): was['called'] = 'some' return {} self.stubs.Set(vm_utils, '_create_cached_image', fake_create_cached_image) def fake_fetch_image(*args): was['called'] = 'none' return {} self.stubs.Set(vm_utils, '_fetch_image', fake_fetch_image) vm_utils._create_image(self.context, None, instance, 'foo', 'bar', 'baz') self.assertEqual(was['called'], cache_type)
def test_network_get_associated_fixed_ips(self): ctxt = context.get_admin_context() values = {'host': 'foo', 'hostname': 'myname'} instance = db.instance_create(ctxt, values) values = {'address': 'bar', 'instance_id': instance['id']} vif = db.virtual_interface_create(ctxt, values) values = {'address': 'baz', 'network_id': 1, 'allocated': True, 'instance_id': instance['id'], 'virtual_interface_id': vif['id']} fixed_address = db.fixed_ip_create(ctxt, values) data = db.network_get_associated_fixed_ips(ctxt, 1) self.assertEqual(len(data), 1) record = data[0] self.assertEqual(record['address'], fixed_address) self.assertEqual(record['instance_id'], instance['id']) self.assertEqual(record['network_id'], 1) self.assertEqual(record['instance_created'], instance['created_at']) self.assertEqual(record['instance_updated'], instance['updated_at']) self.assertEqual(record['instance_hostname'], instance['hostname']) self.assertEqual(record['vif_id'], vif['id']) self.assertEqual(record['vif_address'], vif['address']) data = db.network_get_associated_fixed_ips(ctxt, 1, 'nothing') self.assertEqual(len(data), 0)
def _timeout_test(self, ctxt, timeout, multi_host): values = {'host': 'foo'} instance = db.instance_create(ctxt, values) values = {'multi_host': multi_host, 'host': 'bar'} net = db.network_create_safe(ctxt, values) old = time = timeout - datetime.timedelta(seconds=5) new = time = timeout + datetime.timedelta(seconds=5) # should deallocate values = {'allocated': False, 'instance_id': instance['id'], 'network_id': net['id'], 'updated_at': old} db.fixed_ip_create(ctxt, values) # still allocated values = {'allocated': True, 'instance_id': instance['id'], 'network_id': net['id'], 'updated_at': old} db.fixed_ip_create(ctxt, values) # wrong network values = {'allocated': False, 'instance_id': instance['id'], 'network_id': None, 'updated_at': old} db.fixed_ip_create(ctxt, values) # too new values = {'allocated': False, 'instance_id': instance['id'], 'network_id': None, 'updated_at': new} db.fixed_ip_create(ctxt, values)
def _create_instance(self, instance_id=1, spawn=True): """Creates and spawns a test instance.""" stubs.stubout_loopingcall_start(self.stubs) values = { 'id': instance_id, 'project_id': self.project_id, 'user_id': self.user_id, 'image_ref': 1, 'kernel_id': 2, 'ramdisk_id': 3, 'instance_type_id': '3', # m1.large 'os_type': 'linux', 'architecture': 'x86-64'} instance = db.instance_create(self.context, values) network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False}, {'broadcast': '192.168.0.255', 'dns': ['192.168.0.1'], 'gateway': '192.168.0.1', 'gateway6': 'dead:beef::1', 'ip6s': [{'enabled': '1', 'ip': 'dead:beef::dcad:beff:feef:0', 'netmask': '64'}], 'ips': [{'enabled': '1', 'ip': '192.168.0.100', 'netmask': '255.255.255.0'}], 'label': 'fake', 'mac': 'DE:AD:BE:EF:00:00', 'rxtx_cap': 3})] if spawn: self.conn.spawn(instance, network_info) return instance
def _create_instance(self): return db.instance_create(context.get_admin_context(), {'user_id': 'fake', 'project_id': 'fake', 'instance_type_id': 1, 'memory_mb': 1024, 'vcpus': 2})
def create(self, context): if self.obj_attr_is_set("id"): raise exception.ObjectActionError(action="create", reason="already created") updates = self.obj_get_changes() expected_attrs = [attr for attr in INSTANCE_DEFAULT_FIELDS if attr in updates] if "security_groups" in updates: updates["security_groups"] = [x.name for x in updates["security_groups"]] if "info_cache" in updates: updates["info_cache"] = {"network_info": updates["info_cache"].network_info.json()} updates["extra"] = {} numa_topology = updates.pop("numa_topology", None) if numa_topology: expected_attrs.append("numa_topology") updates["extra"]["numa_topology"] = numa_topology._to_json() pci_requests = updates.pop("pci_requests", None) if pci_requests: expected_attrs.append("pci_requests") updates["extra"]["pci_requests"] = pci_requests.to_json() flavor = updates.pop("flavor", None) if flavor: expected_attrs.append("flavor") old = ( (self.obj_attr_is_set("old_flavor") and self.old_flavor) and self.old_flavor.obj_to_primitive() or None ) new = ( (self.obj_attr_is_set("new_flavor") and self.new_flavor) and self.new_flavor.obj_to_primitive() or None ) flavor_info = {"cur": self.flavor.obj_to_primitive(), "old": old, "new": new} updates["extra"]["flavor"] = jsonutils.dumps(flavor_info) db_inst = db.instance_create(context, updates) self._from_db_object(context, self, db_inst, expected_attrs)
def _do_build(id, proj, user, *args): values = { 'id': id, 'project_id': proj, 'user_id': user, 'image_ref': 1, 'kernel_id': 2, 'ramdisk_id': 3, 'instance_type_id': '3', # m1.large 'os_type': 'linux', 'architecture': 'x86-64'} network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False}, {'broadcast': '192.168.0.255', 'dns': ['192.168.0.1'], 'gateway': '192.168.0.1', 'gateway6': 'dead:beef::1', 'ip6s': [{'enabled': '1', 'ip': 'dead:beef::dcad:beff:feef:0', 'netmask': '64'}], 'ips': [{'enabled': '1', 'ip': '192.168.0.100', 'netmask': '255.255.255.0'}], 'label': 'fake', 'mac': 'DE:AD:BE:EF:00:00', 'rxtx_cap': 3})] instance = db.instance_create(self.context, values) self.conn.spawn(instance, network_info)
def test_get_disk_mapping_complex(self): # The strangest possible disk mapping setup user_context = context.RequestContext(self.user_id, self.project_id) instance_ref = db.instance_create(user_context, self.test_instance) block_device_info = { 'root_device_name': '/dev/vdf', 'swap': {'device_name': '/dev/vdy', 'swap_size': 10}, 'ephemerals': [ {'num': 0, 'virtual_name': 'ephemeral0', 'device_name': '/dev/vdb', 'size': 10}, {'num': 1, 'virtual_name': 'ephemeral1', 'device_name': '/dev/vdc', 'size': 10}, ], 'block_device_mapping': [ {'connection_info': "fake", 'mount_device': "/dev/vda", 'delete_on_termination': True}, ] } mapping = blockinfo.get_disk_mapping("kvm", instance_ref, "virtio", "ide", block_device_info) expect = { 'disk': {'bus': 'virtio', 'dev': 'vdf', 'type': 'disk'}, '/dev/vda': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'}, 'disk.eph0': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'}, 'disk.eph1': {'bus': 'virtio', 'dev': 'vdc', 'type': 'disk'}, 'disk.swap': {'bus': 'virtio', 'dev': 'vdy', 'type': 'disk'}, 'root': {'bus': 'virtio', 'dev': 'vdf', 'type': 'disk'} } self.assertEqual(mapping, expect)
def test_get_disk_mapping_ephemeral(self): # A disk mapping with ephemeral devices user_context = context.RequestContext(self.user_id, self.project_id) instance_ref = db.instance_create(user_context, self.test_instance) instance_ref['instance_type']['swap'] = 5 block_device_info = { 'ephemerals': [ {'num': 0, 'virtual_name': 'ephemeral0', 'device_name': '/dev/vdb', 'size': 10}, {'num': 1, 'virtual_name': 'ephemeral1', 'device_name': '/dev/vdc', 'size': 10}, {'num': 2, 'virtual_name': 'ephemeral2', 'device_name': '/dev/vdd', 'size': 10}, ] } mapping = blockinfo.get_disk_mapping("kvm", instance_ref, "virtio", "ide", block_device_info) expect = { 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'}, 'disk.eph0': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'}, 'disk.eph1': {'bus': 'virtio', 'dev': 'vdc', 'type': 'disk'}, 'disk.eph2': {'bus': 'virtio', 'dev': 'vdd', 'type': 'disk'}, 'disk.swap': {'bus': 'virtio', 'dev': 'vde', 'type': 'disk'}, 'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'} } self.assertEqual(mapping, expect)
def test_spawn_with_network_info(self): # Skip if non-libvirt environment if not self.lazy_load_library_exists(): return # Preparing mocks def fake_none(self, instance): return self.create_fake_libvirt_mock() instance = db.instance_create(self.context, self.test_instance) # Start test self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) conn.firewall_driver.setattr('setup_basic_filtering', fake_none) conn.firewall_driver.setattr('prepare_instance_filter', fake_none) network = db.project_get_network(context.get_admin_context(), self.project.id) ip_dict = {'ip': self.test_ip, 'netmask': network['netmask'], 'enabled': '1'} mapping = {'label': network['label'], 'gateway': network['gateway'], 'mac': instance['mac_address'], 'dns': [network['dns']], 'ips': [ip_dict]} network_info = [(network, mapping)] try: conn.spawn(instance, network_info) except Exception, e: count = (0 <= str(e.message).find('Unexpected method call'))
def test_finish_revert_migration(self): self._instance_data = self._get_instance_data() instance = db.instance_create(self._context, self._instance_data) network_info = fake_network.fake_get_instance_nw_info( self.stubs, spectacular=True) fake_revert_path = ('C:\\FakeInstancesPath\\%s\\_revert' % instance['name']) m = basevolumeutils.BaseVolumeUtils.volume_in_mapping(mox.IsA(str), None) m.AndReturn(False) m = pathutils.PathUtils.get_instance_migr_revert_dir(instance['name']) m.AndReturn(fake_revert_path) fake.PathUtils.rename(fake_revert_path, mox.IsA(str)) self._set_vm_name(instance['name']) self._setup_create_instance_mocks(None, False) vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name), constants.HYPERV_VM_STATE_ENABLED) self._mox.ReplayAll() self._conn.finish_revert_migration(instance, network_info, None) self._mox.VerifyAll()
def setUp(self): super(VolumeTestCase, self).setUp() self.compute = utils.import_object(FLAGS.compute_manager) self.flags(connection_type='fake') self.volume = utils.import_object(FLAGS.volume_manager) self.context = context.get_admin_context() self.instance_id = db.instance_create(self.context, {})['id']
def test_get_disk_mapping_ephemeral(self): # A disk mapping with ephemeral devices user_context = context.RequestContext(self.user_id, self.project_id) self.test_instance['system_metadata']['instance_type_swap'] = 5 instance_ref = db.instance_create(user_context, self.test_instance) block_device_info = { 'ephemerals': [ {'device_type': 'disk', 'guest_format': 'ext3', 'device_name': '/dev/vdb', 'size': 10}, {'disk_bus': 'ide', 'guest_format': None, 'device_name': '/dev/vdc', 'size': 10}, {'device_type': 'floppy', 'device_name': '/dev/vdd', 'size': 10}, ] } mapping = blockinfo.get_disk_mapping("kvm", instance_ref, "virtio", "ide", block_device_info) expect = { 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk.eph0': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk', 'format': 'ext3'}, 'disk.eph1': {'bus': 'ide', 'dev': 'vdc', 'type': 'disk'}, 'disk.eph2': {'bus': 'virtio', 'dev': 'vdd', 'type': 'floppy'}, 'disk.swap': {'bus': 'virtio', 'dev': 'vde', 'type': 'disk'}, 'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, } self.assertEqual(mapping, expect)
def test_attach_volume_raise_exception(self): """This shows how to test when exceptions are raised.""" stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeFailedTests) conn = xenapi_conn.get_connection(False) volume = self._create_volume() instance = db.instance_create(self.context, self.instance_values) xenapi_fake.create_vm(instance.name, 'Running') self.assertRaises(Exception, conn.attach_volume, instance.name, volume['id'], '/dev/sdc')
def test_migrate_disk_and_power_off_passes_exceptions(self): instance = db.instance_create(self.context, self.instance_values) instance_type = db.instance_type_get_by_name(self.context, 'm1.large') stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests) def fake_raise(*args, **kwargs): raise exception.MigrationError(reason='test failure') self.stubs.Set(vmops.VMOps, "_migrate_vhd", fake_raise) conn = xenapi_conn.get_connection(False) self.assertRaises(exception.MigrationError, conn.migrate_disk_and_power_off, self.context, instance, '127.0.0.1', instance_type)
def _create_instance(self, params={}): """Create a test instance""" inst = {} inst['image_ref'] = 1 inst['reservation_id'] = 'r-fakeres' inst['launch_time'] = '10' inst['user_id'] = self.user_id inst['project_id'] = self.project_id type_id = instance_types.get_instance_type_by_name('m1.tiny')['id'] inst['instance_type_id'] = type_id inst['ami_launch_index'] = 0 inst.update(params) return db.instance_create(self.context, inst)['id']
def _test_set_flavor(self, namespace): prefix = '%s_' % namespace if namespace is not None else '' db_inst = db.instance_create( self.context, { 'user_id': self.context.user_id, 'project_id': self.context.project_id, }) inst = instance.Instance.get_by_uuid(self.context, db_inst['uuid']) db_flavor = flavors.get_default_flavor() inst.set_flavor(db_flavor, namespace) db_inst = db.instance_get(self.context, db_inst['id']) self.assertEqual(db_flavor['flavorid'], flavors.extract_flavor(db_inst, prefix)['flavorid'])
def _create_instance_in_the_db(self): values = { 'name': 1, 'id': 1, 'project_id': self.project.id, 'user_id': self.user.id, 'image_id': "1", 'kernel_id': "1", 'ramdisk_id': "1", 'instance_type': 'm1.large', 'mac_address': 'aa:bb:cc:dd:ee:ff', } self.instance = db.instance_create(values)
def _create_instance(self): """Create a test instance""" inst = {} #inst['host'] = self.host #inst['name'] = 'instance-1234' inst['image_id'] = 1 inst['reservation_id'] = 'r-fakeres' inst['launch_time'] = '10' inst['user_id'] = self.user_id inst['project_id'] = self.project_id inst['instance_type_id'] = 1 inst['ami_launch_index'] = 0 return db.instance_create(self.context, inst)['id']
def _create_instance_in_the_db(self): values = { 'name': 1, 'id': 1, 'project_id': self.project_id, 'user_id': self.user_id, 'image_ref': "1", 'kernel_id': "1", 'ramdisk_id': "1", 'mac_address': "de:ad:be:ef:be:ef", 'instance_type': 'm1.large', } self.instance = db.instance_create(None, values)
def test_instance_get_project_vpn_joins(self): values = { 'instance_type_id': FLAGS.default_instance_type, 'image_ref': FLAGS.vpn_image_id, 'project_id': self.project_id } instance = db.instance_create(self.context, values) _setup_networking(instance['id']) result = db.instance_get_project_vpn(self.context.elevated(), self.project_id) self.assertEqual(instance['id'], result['id']) self.assertEqual(result['fixed_ips'][0]['floating_ips'][0].address, '1.2.1.2')
def test_describe_instances(self): """Makes sure describe_instances works and filters results.""" inst1 = db.instance_create(self.context, { 'reservation_id': 'a', 'image_id': 1, 'host': 'host1' }) inst2 = db.instance_create(self.context, { 'reservation_id': 'a', 'image_id': 1, 'host': 'host2' }) comp1 = db.service_create(self.context, { 'host': 'host1', 'availability_zone': 'zone1', 'topic': "compute" }) comp2 = db.service_create(self.context, { 'host': 'host2', 'availability_zone': 'zone2', 'topic': "compute" }) result = self.cloud.describe_instances(self.context) result = result['reservationSet'][0] self.assertEqual(len(result['instancesSet']), 2) instance_id = ec2utils.id_to_ec2_id(inst2['id']) result = self.cloud.describe_instances(self.context, instance_id=[instance_id]) result = result['reservationSet'][0] self.assertEqual(len(result['instancesSet']), 1) self.assertEqual(result['instancesSet'][0]['instanceId'], instance_id) self.assertEqual( result['instancesSet'][0]['placement']['availabilityZone'], 'zone2') db.instance_destroy(self.context, inst1['id']) db.instance_destroy(self.context, inst2['id']) db.service_destroy(self.context, comp1['id']) db.service_destroy(self.context, comp2['id'])
def setUp(self): super(VolumeTestCase, self).setUp() self.compute = importutils.import_object(FLAGS.compute_manager) vol_tmpdir = tempfile.mkdtemp() self.flags(compute_driver='nova.virt.fake.FakeDriver', volumes_dir=vol_tmpdir) self.stubs.Set(nova.flags.FLAGS, 'notification_driver', ['nova.openstack.common.notifier.test_notifier']) self.volume = importutils.import_object(FLAGS.volume_manager) self.context = context.get_admin_context() instance = db.instance_create(self.context, {}) self.instance_id = instance['id'] self.instance_uuid = instance['uuid'] test_notifier.NOTIFICATIONS = []
def test_get_disk_mapping_simple_configdrive(self): # A simple disk mapping setup, but with configdrive added # It's necessary to check if the architecture is power, because # power doesn't have support to ide, and so libvirt translate # all ide calls to scsi self.flags(force_config_drive=True) user_context = context.RequestContext(self.user_id, self.project_id) instance_ref = db.instance_create(user_context, self.test_instance) mapping = blockinfo.get_disk_mapping("kvm", instance_ref, "virtio", "ide") # The last device is selected for this. on x86 is the last ide # device (hdd). Since power only support scsi, the last device # is sdz bus_ppc = ("scsi", "sdz") expect_bus = {"ppc": bus_ppc, "ppc64": bus_ppc} bus, dev = expect_bus.get(blockinfo.libvirt_utils.get_arch({}), ("ide", "hdd")) expect = { 'disk': { 'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1' }, 'disk.local': { 'bus': 'virtio', 'dev': 'vdb', 'type': 'disk' }, 'disk.config': { 'bus': bus, 'dev': dev, 'type': 'cdrom' }, 'root': { 'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1' } } self.assertEqual(expect, mapping)
def test_preparing_xml_info(self): conn = libvirt_conn.LibvirtConnection(True) instance_ref = db.instance_create(self.context, self.test_instance) result = conn._prepare_xml_info(instance_ref, False) self.assertFalse(result['nics']) result = conn._prepare_xml_info(instance_ref, False, _create_network_info()) self.assertTrue(len(result['nics']) == 1) result = conn._prepare_xml_info(instance_ref, False, _create_network_info(2)) self.assertTrue(len(result['nics']) == 2)
def test_run_attach_detach_volume(self): """Make sure volume can be attached and detached from instance.""" inst = {} inst['image_id'] = 1 inst['reservation_id'] = 'r-fakeres' inst['launch_time'] = '10' inst['user_id'] = 'fake' inst['project_id'] = 'fake' inst['instance_type_id'] = '2' # m1.tiny inst['ami_launch_index'] = 0 instance = db.instance_create(self.context, {}) instance_id = instance['id'] instance_uuid = instance['uuid'] mountpoint = "/dev/sdf" volume = self._create_volume() volume_id = volume['id'] self.volume.create_volume(self.context, volume_id) if FLAGS.fake_tests: db.volume_attached(self.context, volume_id, instance_uuid, mountpoint) else: self.compute.attach_volume(self.context, instance_uuid, volume_id, mountpoint) vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual(vol['status'], "in-use") self.assertEqual(vol['attach_status'], "attached") self.assertEqual(vol['mountpoint'], mountpoint) self.assertEqual(vol['instance_uuid'], instance_uuid) self.assertRaises(exception.NovaException, self.volume.delete_volume, self.context, volume_id) if FLAGS.fake_tests: db.volume_detached(self.context, volume_id) else: self.compute.detach_volume(self.context, instance_uuid, volume_id) vol = db.volume_get(self.context, volume_id) self.assertEqual(vol['status'], "available") self.volume.delete_volume(self.context, volume_id) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, volume_id) db.instance_destroy(self.context, instance_uuid)
def test_instance_get_all_hung_in_rebooting(self): ctxt = context.get_admin_context() # Ensure no instances are returned. results = db.instance_get_all_hung_in_rebooting(ctxt, 10) self.assertEqual(0, len(results)) # Ensure one rebooting instance with updated_at older than 10 seconds # is returned. updated_at = datetime.datetime(2000, 01, 01, 12, 00, 00) values = {"task_state": "rebooting", "updated_at": updated_at} instance = db.instance_create(ctxt, values) results = db.instance_get_all_hung_in_rebooting(ctxt, 10) self.assertEqual(1, len(results)) db.instance_update(ctxt, instance.id, {"task_state": None}) # Ensure the newly rebooted instance is not returned. updated_at = datetime.datetime.utcnow() values = {"task_state": "rebooting", "updated_at": updated_at} instance = db.instance_create(ctxt, values) results = db.instance_get_all_hung_in_rebooting(ctxt, 10) self.assertEqual(0, len(results)) db.instance_update(ctxt, instance.id, {"task_state": None})
def test_get_disk_mapping_lxc(self): # A simple disk mapping setup, but for lxc user_context = context.RequestContext(self.user_id, self.project_id) instance_ref = db.instance_create(user_context, self.test_instance) mapping = blockinfo.get_disk_mapping("lxc", instance_ref, "lxc", "lxc", None) expect = { 'disk': {'bus': 'lxc', 'dev': None, 'type': 'disk'}, 'root': {'bus': 'lxc', 'dev': None, 'type': 'disk'} } self.assertEqual(mapping, expect)
def setUp(self): super(VolumeTestCase, self).setUp() self.compute = importutils.import_object(FLAGS.compute_manager) vol_tmpdir = tempfile.mkdtemp() self.flags(compute_driver='nova.virt.fake.FakeDriver', volumes_dir=vol_tmpdir, notification_driver=[test_notifier.__name__]) self.stubs.Set(iscsi.TgtAdm, '_get_target', self.fake_get_target) self.volume = importutils.import_object(FLAGS.volume_manager) self.context = context.get_admin_context() instance = db.instance_create(self.context, {}) self.instance_id = instance['id'] self.instance_uuid = instance['uuid'] test_notifier.NOTIFICATIONS = []
def test_fetch_diskio_with_libvirt_non_existent_instance(self): flags.FLAGS.connection_type = 'libvirt' instance = db.instance_create(self.context, {}) self.mox.StubOutWithMock(self.manager.db, 'instance_get_all_by_host') self.manager.db.instance_get_all_by_host( self.context, self.manager.host, ).AndReturn([instance]) self.mox.ReplayAll() list(self.pollster.get_counters(self.manager, self.context))
def test_live_migration_raises_exception(self): """Confirms recover method is called when exceptions are raised.""" # Skip if non-libvirt environment if not self.lazy_load_library_exists(): return # Preparing data self.compute = utils.import_object(FLAGS.compute_manager) instance_dict = { 'host': 'fake', 'state': power_state.RUNNING, 'state_description': 'running' } instance_ref = db.instance_create(self.context, self.test_instance) instance_ref = db.instance_update(self.context, instance_ref['id'], instance_dict) vol_dict = {'status': 'migrating', 'size': 1} volume_ref = db.volume_create(self.context, vol_dict) db.volume_attached(self.context, volume_ref['id'], instance_ref['id'], '/dev/fake') # Preparing mocks vdmock = self.mox.CreateMock(libvirt.virDomain) self.mox.StubOutWithMock(vdmock, "migrateToURI") vdmock.migrateToURI(FLAGS.live_migration_uri % 'dest', mox.IgnoreArg(), None, FLAGS.live_migration_bandwidth).\ AndRaise(libvirt.libvirtError('ERR')) def fake_lookup(instance_name): if instance_name == instance_ref.name: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) # Start test self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) self.assertRaises(libvirt.libvirtError, conn._live_migration, self.context, instance_ref, 'dest', '', self.compute.recover_live_migration) instance_ref = db.instance_get(self.context, instance_ref['id']) self.assertTrue(instance_ref['state_description'] == 'running') self.assertTrue(instance_ref['state'] == power_state.RUNNING) volume_ref = db.volume_get(self.context, volume_ref['id']) self.assertTrue(volume_ref['status'] == 'in-use') db.volume_destroy(self.context, volume_ref['id']) db.instance_destroy(self.context, instance_ref['id'])
def test_get_disk_mapping_blockdev_many(self): # A disk mapping with a blockdev replacing all devices user_context = context.RequestContext(self.user_id, self.project_id) instance_ref = db.instance_create(user_context, self.test_instance) block_device_info = { 'block_device_mapping': [ { 'connection_info': "fake", 'mount_device': "/dev/vda", 'delete_on_termination': True }, { 'connection_info': "fake", 'mount_device': "/dev/vdb", 'delete_on_termination': True }, { 'connection_info': "fake", 'mount_device': "/dev/vdc", 'delete_on_termination': True }, ] } mapping = blockinfo.get_disk_mapping("kvm", instance_ref, "virtio", "ide", block_device_info) expect = { '/dev/vda': { 'bus': 'virtio', 'dev': 'vda', 'type': 'disk' }, '/dev/vdb': { 'bus': 'virtio', 'dev': 'vdb', 'type': 'disk' }, '/dev/vdc': { 'bus': 'virtio', 'dev': 'vdc', 'type': 'disk' }, 'root': { 'bus': 'virtio', 'dev': 'vda', 'type': 'disk' } } self.assertEqual(mapping, expect)
def test_multi_nic(self): instance_data = dict(self.test_instance) network_info = _create_network_info(2) conn = libvirt_conn.LibvirtConnection(True) instance_ref = db.instance_create(self.context, instance_data) xml = conn.to_xml(instance_ref, False, network_info) tree = xml_to_tree(xml) interfaces = tree.findall("./devices/interface") self.assertEquals(len(interfaces), 2) parameters = interfaces[0].findall('./filterref/parameter') self.assertEquals(interfaces[0].get('type'), 'bridge') self.assertEquals(parameters[0].get('name'), 'IP') self.assertEquals(parameters[0].get('value'), '0.0.0.0/0') self.assertEquals(parameters[1].get('name'), 'DHCPSERVER') self.assertEquals(parameters[1].get('value'), 'fake')
def test_get_disk_mapping_simple(self): # The simplest possible disk mapping setup, all defaults user_context = context.RequestContext(self.user_id, self.project_id) instance_ref = db.instance_create(user_context, self.test_instance) mapping = blockinfo.get_disk_mapping("kvm", instance_ref, "virtio", "ide") expect = { 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'}, 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'}, 'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'} } self.assertEqual(mapping, expect)
def _create_fake_instance(self, params=None): """Create a test instance.""" if not params: params = {} inst = {} inst['vm_state'] = vm_states.ACTIVE inst['image_ref'] = 1 inst['reservation_id'] = 'r-fakeres' inst['user_id'] = 'fake' inst['project_id'] = 'fake' inst['instance_type_id'] = 2 inst['ami_launch_index'] = 0 inst.update(params) return db.instance_create(self.context, inst)
def _wrapped_create(self, params=None): inst = {} inst['image_ref'] = 1 inst['user_id'] = self.user_id inst['project_id'] = self.project_id type_id = instance_types.get_instance_type_by_name('m1.tiny')['id'] inst['instance_type_id'] = type_id inst['root_gb'] = 0 inst['ephemeral_gb'] = 0 inst['access_ip_v4'] = '1.2.3.4' inst['access_ip_v6'] = 'feed:5eed' inst['display_name'] = 'test_instance' if params: inst.update(params) return db.instance_create(self.context, inst)
def create_instance(testcase): fake.stub_out_image_service(testcase.stubs) ctxt = context.get_admin_context() instance_type = db.flavor_get(ctxt, 1) sys_meta = flavors.save_flavor_info({}, instance_type) return db.instance_create( ctxt, { 'user_id': 'fake', 'project_id': 'fake', 'instance_type_id': 1, 'memory_mb': 1024, 'vcpus': 2, 'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6', 'system_metadata': sys_meta })
def test_create_with_special_things(self): self.mox.StubOutWithMock(db, 'instance_create') fake_inst = fake_instance.fake_db_instance() db.instance_create( self.context, { 'host': 'foo-host', 'security_groups': ['foo', 'bar'], 'info_cache': { 'network_info': '[]' }, }).AndReturn(fake_inst) self.mox.ReplayAll() secgroups = security_group.SecurityGroupList() secgroups.objects = [] for name in ('foo', 'bar'): secgroup = security_group.SecurityGroup() secgroup.name = name secgroups.objects.append(secgroup) info_cache = instance_info_cache.InstanceInfoCache() info_cache.network_info = network_model.NetworkInfo() inst = instance.Instance(host='foo-host', security_groups=secgroups, info_cache=info_cache) inst.create(self.context)
def _create_instance(self, instance_id=1, spawn=True): """Creates and spawns a test instance.""" stubs.stubout_loopingcall_start(self.stubs) values = { 'id': instance_id, 'project_id': self.project_id, 'user_id': self.user_id, 'image_ref': 1, 'kernel_id': 2, 'ramdisk_id': 3, 'local_gb': 20, 'instance_type_id': '3', # m1.large 'os_type': 'linux', 'architecture': 'x86-64' } instance = db.instance_create(self.context, values) network_info = [({ 'bridge': 'fa0', 'id': 0, 'injected': False }, { 'broadcast': '192.168.0.255', 'dns': ['192.168.0.1'], 'gateway': '192.168.0.1', 'gateway6': 'dead:beef::1', 'ip6s': [{ 'enabled': '1', 'ip': 'dead:beef::dcad:beff:feef:0', 'netmask': '64' }], 'ips': [{ 'enabled': '1', 'ip': '192.168.0.100', 'netmask': '255.255.255.0' }], 'label': 'fake', 'mac': 'DE:AD:BE:EF:00:00', 'rxtx_cap': 3 })] if spawn: self.conn.spawn(self.context, instance, network_info) return instance
def test_finish_migrate_no_local_storage(self): tiny_type_id = \ instance_types.get_instance_type_by_name('m1.tiny')['id'] self.values.update({'instance_type_id': tiny_type_id, 'local_gb': 0}) instance = db.instance_create(self.context, self.values) def fake_vdi_resize(*args, **kwargs): raise Exception("This shouldn't be called") self.stubs.Set(stubs.FakeSessionForMigrationTests, "VDI_resize_online", fake_vdi_resize) stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests) stubs.stubout_loopingcall_start(self.stubs) conn = xenapi_conn.get_connection(False) network_info = [({ 'bridge': 'fa0', 'id': 0, 'injected': False }, { 'broadcast': '192.168.0.255', 'dns': ['192.168.0.1'], 'gateway': '192.168.0.1', 'gateway6': 'dead:beef::1', 'ip6s': [{ 'enabled': '1', 'ip': 'dead:beef::dcad:beff:feef:0', 'netmask': '64' }], 'ips': [{ 'enabled': '1', 'ip': '192.168.0.100', 'netmask': '255.255.255.0' }], 'label': 'fake', 'mac': 'DE:AD:BE:EF:00:00', 'rxtx_cap': 3 })] conn.finish_migration(self.context, instance, dict(base_copy='hurr', cow='durr'), network_info, resize_instance=True)
def _create_instance(self, params={}): """Create a test instance.""" flavor = flavors.get_flavor_by_name('m1.tiny') sys_meta = flavors.save_flavor_info({}, flavor) inst = {} inst['image_ref'] = 1 inst['reservation_id'] = 'r-fakeres' inst['user_id'] = self.user_id inst['project_id'] = self.project_id inst['instance_type_id'] = flavor['id'] inst['system_metadata'] = sys_meta inst['ami_launch_index'] = 0 inst['root_gb'] = 0 inst['ephemeral_gb'] = 0 inst.update(params) return db.instance_create(self.context, inst)['id']
def _test_get_flavor(self, namespace): prefix = '%s_' % namespace if namespace is not None else '' db_inst = db.instance_create( self.context, { 'user_id': self.context.user_id, 'project_id': self.context.project_id, 'system_metadata': flavors.save_flavor_info({}, flavors.get_default_flavor(), prefix) }) db_flavor = flavors.extract_flavor(db_inst, prefix) inst = instance.Instance.get_by_uuid(self.context, db_inst['uuid']) flavor = inst.get_flavor(namespace) self.assertEqual(db_flavor['flavorid'], flavor.flavorid)
def test_resize_xenserver_6(self): instance = db.instance_create(self.context, self.values) called = {'resize': False} def fake_vdi_resize(*args, **kwargs): called['resize'] = True self.stubs.Set(stubs.FakeSessionForMigrationTests, "VDI_resize", fake_vdi_resize) stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests, product_version=(6, 0, 0)) stubs.stubout_loopingcall_start(self.stubs) conn = xenapi_conn.get_connection(False) conn._vmops.resize_instance(instance, '') self.assertEqual(called['resize'], True)