def test_infrastructure_cache(realtime_instance): cache = InfrastructureCache(float('inf')) config = VSphereConfig(realtime_instance, {}, logger) mock_api = build_rest_api_client(config, logger) mors = { MagicMock(spec=k, _moId="foo"): object() for k in ALL_RESOURCES_WITH_METRICS * 2 } with cache.update(): for k, v in iteritems(mors): cache.set_mor_props(k, v) cache.set_all_tags(mock_api.get_resource_tags_for_mors(mors)) for r in ALL_RESOURCES_WITH_METRICS: assert len(list(cache.get_mors(r))) == 2 for k, v in iteritems(mors): assert cache.get_mor_props(k) == v vm_mor = vim.VirtualMachine(moId='VM4-4-1') vm2_mor = vim.VirtualMachine(moId='i-dont-have-tags') datastore = vim.Datastore(moId='NFS-Share-1') assert cache.get_mor_tags(vm_mor) == [ 'my_cat_name_1:my_tag_name_1', 'my_cat_name_2:my_tag_name_2' ] assert cache.get_mor_tags(datastore) == ['my_cat_name_2:my_tag_name_2'] assert cache.get_mor_tags(vm2_mor) == []
def get_datastore_mo(client, soap_stub, datacenter_name, datastore_name): """ Return datastore managed object with specific datacenter and datastore name """ datastore = get_datastore(client, datacenter_name, datastore_name) if not datastore: return None datastore_mo = vim.Datastore(datastore, soap_stub) return datastore_mo
def test_tags_cache(realtime_instance): cache = TagsCache(float('inf')) config = VSphereConfig(realtime_instance, logger) mock_api = VSphereRestAPI(config, log=logger) with cache.update(): cache.set_all_tags(mock_api.get_resource_tags()) vm_mor = vim.VirtualMachine(moId='VM4-4-1') vm2_mor = vim.VirtualMachine(moId='i-dont-have-tags') datastore = vim.Datastore(moId='NFS-Share-1') assert cache.get_mor_tags(vm_mor) == ['my_cat_name_1:my_tag_name_1', 'my_cat_name_2:my_tag_name_2'] assert cache.get_mor_tags(datastore) == ['my_cat_name_2:my_tag_name_2'] assert cache.get_mor_tags(vm2_mor) == []
def run(self, ids=None, names=None, datastores=None, datastore_clusters=None, resource_pools=None, vapps=None, hosts=None, folders=None, clusters=None, datacenters=None, virtual_switches=None, no_recursion=False, vsphere=None): # TODO: food for thought. PowerCli contains additional # parameters that are not present here for the folliwing reason: # <server> - we may need to bring it in if we decide to have # connections to more than 1 VC. # <tag> - Tags in VC are not the same as tags you see in Web # Client for the reason, that those tags are stored # in Inventory Service only. PowerCli somehow can access # it, from vSphere SDK there is no way. self.establish_connection(vsphere) props = ['config.guestFullName', 'name', 'runtime.powerState'] moid_to_vm = {} # getting vms by their ids vms_from_vmids = [] if ids: vms_from_vmids = [ vim.VirtualMachine(moid, stub=self.si._stub) for moid in ids ] GetVMs.__add_vm_properties_to_map_from_vm_array( moid_to_vm, vms_from_vmids) # getting vms by their names vms_from_names = [] if names: container = self.si_content.viewManager.CreateContainerView( self.si_content.rootFolder, [vim.VirtualMachine], True) for vm in container.view: if vm.name in names: vms_from_names.append(vm) GetVMs.__add_vm_properties_to_map_from_vm_array( moid_to_vm, vms_from_names) # getting vms from datastore objects vms_from_datastores = [] if datastores: vim_datastores = [ vim.Datastore(moid, stub=self.si._stub) for moid in datastores ] for ds in vim_datastores: vms_from_datastores.extend(ds.vm) GetVMs.__add_vm_properties_to_map_from_vm_array( moid_to_vm, vms_from_datastores) # getting vms from datastore cluster objects vms_from_datastore_clusters = [] if datastore_clusters: vim_datastore_clusters = [ vim.StoragePod(moid, stub=self.si._stub) for moid in datastore_clusters ] for ds_cl in vim_datastore_clusters: for ds in ds_cl.childEntity: vms_from_datastore_clusters.extend(ds.vm) GetVMs.__add_vm_properties_to_map_from_vm_array( moid_to_vm, vms_from_datastore_clusters) # getting vms from virtual switch objects vms_from_virtual_switches = [] if virtual_switches: vim_virtual_switches = [ vim.DistributedVirtualSwitch(moid, stub=self.si._stub) for moid in virtual_switches ] for vswitch in vim_virtual_switches: for pg in vswitch.portgroup: vms_from_virtual_switches.extend(pg.vm) GetVMs.__add_vm_properties_to_map_from_vm_array( moid_to_vm, vms_from_virtual_switches) # getting vms from containers (location param) vms_from_containers = [] containers = [] if resource_pools: containers += [ vim.ResourcePool(moid, stub=self.si._stub) for moid in resource_pools ] if vapps: containers += [ vim.VirtualApp(moid, stub=self.si._stub) for moid in vapps ] if hosts: containers += [ vim.HostSystem(moid, stub=self.si._stub) for moid in hosts ] if folders: containers += [ vim.Folder(moid, stub=self.si._stub) for moid in folders ] if clusters: containers += [ vim.ComputeResource(moid, stub=self.si._stub) for moid in clusters ] if datacenters: containers += [ vim.Datacenter(moid, stub=self.si._stub) for moid in datacenters ] for cont in containers: objView = self.si_content.viewManager.CreateContainerView( cont, [vim.VirtualMachine], not no_recursion) tSpec = vim.PropertyCollector.TraversalSpec( name='tSpecName', path='view', skip=False, type=vim.view.ContainerView) pSpec = vim.PropertyCollector.PropertySpec(all=False, pathSet=props, type=vim.VirtualMachine) oSpec = vim.PropertyCollector.ObjectSpec(obj=objView, selectSet=[tSpec], skip=False) pfSpec = vim.PropertyCollector.FilterSpec( objectSet=[oSpec], propSet=[pSpec], reportMissingObjectsInResults=False) retOptions = vim.PropertyCollector.RetrieveOptions() retProps = self.si_content.propertyCollector.RetrievePropertiesEx( specSet=[pfSpec], options=retOptions) vms_from_containers += retProps.objects while retProps.token: retProps = self.si_content.propertyCollector.\ ContinueRetrievePropertiesEx( token=retProps.token) vms_from_containers += retProps.objects objView.Destroy() for vm in vms_from_containers: if vm.obj._GetMoId() not in moid_to_vm: moid_to_vm[vm.obj._GetMoId()] = { "moid": vm.obj._GetMoId(), "name": vm.propSet[1].val, "os": vm.propSet[0].val, "runtime.powerState": vm.propSet[2].val } return moid_to_vm.values()
def run(self, vms, persistence='Persistent', disk_type='flat', capacity_gb=1, datastore=None, datastore_cluster=None, device_name=None, disk_path='', storage_format='Thin'): # TODO: 'controller' parameter is missing here. The reason is because we do not support # passing real objects like PowerCli and there is no uuid to find and address the # controller in the system. persistence = persistence.lower() disk_type = disk_type.lower() storage_format = storage_format.lower() si = self.si si_content = si.RetrieveContent() vm_objs = [vim.VirtualMachine(moid, stub=si._stub) for moid in vms] # by checking the name property, the vms' existance is checked. [vm_obj.name for vm_obj in vm_objs] datastore_obj = None if datastore: datastore_obj = vim.Datastore(datastore, stub=si._stub) # by checking the name property, the vms' existance is checked. datastore_obj.name result = [] if datastore_cluster: ds_clust_obj = vim.StoragePod(datastore_cluster, stub=si._stub) # by retrieving the name property, the existance is checked. ds_clust_obj.name srm = si_content.storageResourceManager for vm in vm_objs: vm_reconfig_spec = NewHardDisk.get_vm_reconfig_spec( vm, datastore_obj, disk_type, storage_format, persistence, disk_path, device_name, capacity_gb) storage_placement_spec = NewHardDisk.get_storage_placement_spec( ds_clust_obj, vm, vm_reconfig_spec) datastores = srm.RecommendDatastores( storageSpec=storage_placement_spec) if not datastores.recommendations: sys.stderr.write( 'Skipping %s as there is no datastore recommendation' % vm.obj._GetMoId()) add_disk_task = srm.ApplyStorageDrsRecommendation_Task( datastores.recommendations[0].key) successfully_added_disk = self._wait_for_task(add_disk_task) result.append({ "vm_moid": vm._GetMoId(), "success": successfully_added_disk }) else: for vm in vm_objs: vm_reconfig_spec = NewHardDisk.get_vm_reconfig_spec( vm, datastore_obj, disk_type, storage_format, persistence, disk_path, device_name, capacity_gb) add_disk_task = vm.ReconfigVM_Task(spec=vm_reconfig_spec) successfully_added_disk = self._wait_for_task(add_disk_task) result.append({ "vm_moid": vm._GetMoId(), "success": successfully_added_disk }) return result
def test_vmware_get_inventory(): content = mock.Mock(spec=vim.ServiceInstanceContent) # Compute case 1 host_1 = mock.Mock(spec=vim.HostSystem) host_1._moId = 'host:1' host_1.name = 'host-1' host_1.summary.config.name = 'host-1.' folder_1 = mock.Mock(spec=vim.ComputeResource) folder_1.host = [host_1] # Computer case 2 host_2 = mock.Mock(spec=vim.HostSystem) host_2._moId = 'host:2' host_2.name = 'host-2' host_2.summary.config.name = 'host-2.' folder_2 = vim.ClusterComputeResource('computer-cluster:1') folder_2.__dict__['name'] = 'compute-cluster-1' folder_2.__dict__['host'] = [host_2] # Folders case host_3 = mock.Mock(spec=vim.HostSystem) host_3._moId = 'host:3' host_3.name = 'host-3' host_3.summary.config.name = 'host-3.' folder_3 = mock.Mock(spec=vim.ComputeResource) folder_3.host = [host_3] folder_4 = vim.Folder('folder:4') folder_4.__dict__['name'] = 'folder-4' folder_4.__dict__['childEntity'] = [folder_3] folder_5 = vim.Folder('folder:5') folder_5.__dict__['name'] = 'folder-5' folder_5.__dict__['childEntity'] = [folder_4] # Datastore case 1 datastore_1 = vim.Datastore('datastore:1') datastore_1.__dict__['name'] = 'datastore-1' # Datastore case 2 datastore_2 = vim.Datastore('datastore:2') datastore_2.__dict__['name'] = 'datastore-2' datastore_2_folder = vim.StoragePod('storagepod:1') datastore_2_folder.__dict__['childEntity'] = [datastore_2] datastore_2_folder.__dict__['name'] = 'datastore2-folder' data_center_1 = mock.Mock(spec=vim.Datacenter) data_center_1.name = 'dc-1' data_center_1_hostfolder = mock.Mock(spec=vim.Folder) data_center_1_hostfolder.childEntity = [folder_1, folder_2, folder_5] data_center_1.hostFolder = data_center_1_hostfolder dc1_datastoreFolder = mock.Mock(spec=vim.Folder) dc1_datastoreFolder.childEntity = [datastore_1, datastore_2_folder] data_center_1.datastoreFolder = dc1_datastoreFolder rootFolder1 = mock.Mock(spec=vim.Folder) rootFolder1.childEntity = [data_center_1] content.rootFolder = rootFolder1 collect_only = { 'vms': True, 'vmguests': True, 'datastores': True, 'hosts': True, 'snapshots': True, } collector = VmwareCollector( '127.0.0.1', 'root', 'password', collect_only, ignore_ssl=True, ) collector.content = content with contextlib.ExitStack() as stack: # We have to disable the LazyObject magic on pyvmomi classes so that we can use them as fakes stack.enter_context(mock.patch.object(vim.Folder, 'name', None)) stack.enter_context(mock.patch.object(vim.Folder, 'childEntity', None)) stack.enter_context(mock.patch.object(vim.ClusterComputeResource, 'name', None)) stack.enter_context(mock.patch.object(vim.ClusterComputeResource, 'host', None)) stack.enter_context(mock.patch.object(vim.Datastore, 'name', None)) stack.enter_context(mock.patch.object(vim.StoragePod, 'childEntity', None)) stack.enter_context(mock.patch.object(vim.StoragePod, 'name', None)) host = yield collector.host_labels ds = yield collector.datastore_labels assert host == { 'host:1': ['host-1', 'dc-1', ''], 'host:2': ['host-2', 'dc-1', 'compute-cluster-1'], 'host:3': ['host-3', 'dc-1', ''], } assert ds == { 'datastore-1': ['datastore-1', 'dc-1', ''], 'datastore-2': ['datastore-2', 'dc-1', 'datastore2-folder'], }
def test_vmware_get_inventory(): content = mock.Mock() # Compute case 1 host_1 = mock.Mock() host_1._moId = 'host:1' host_1.name = 'host-1' folder_1 = mock.Mock() folder_1.host = [host_1] # Computer case 2 host_2 = mock.Mock() host_2._moId = 'host:2' host_2.name = 'host-2' host_2.summary.config.name = 'host-2.' folder_2 = vim.ClusterComputeResource('computer-cluster:1') folder_2.__dict__['name'] = 'compute-cluster-1' folder_2.__dict__['host'] = [host_2] # Datastore case 1 datastore_1 = vim.Datastore('datastore:1') datastore_1.__dict__['name'] = 'datastore-1' # Datastore case 2 datastore_2 = vim.Datastore('datastore:2') datastore_2.__dict__['name'] = 'datastore-2' datastore_2_folder = mock.Mock() datastore_2_folder.childEntity = [datastore_2] datastore_2_folder.name = 'datastore2-folder' data_center_1 = mock.Mock() data_center_1.name = 'dc-1' data_center_1.hostFolder.childEntity = [folder_1, folder_2] data_center_1.datastoreFolder.childEntity = [ datastore_1, datastore_2_folder ] content.rootFolder.childEntity = [data_center_1] collect_only = { 'vms': True, 'vmguests': True, 'datastores': True, 'hosts': True, 'snapshots': True, } collector = VmwareCollector( '127.0.0.1', 'root', 'password', collect_only, ignore_ssl=True, ) with contextlib.ExitStack() as stack: # We have to disable the LazyObject magic on pyvmomi classes so that we can use them as fakes stack.enter_context( mock.patch.object(vim.ClusterComputeResource, 'name', None)) stack.enter_context( mock.patch.object(vim.ClusterComputeResource, 'host', None)) stack.enter_context(mock.patch.object(vim.Datastore, 'name', None)) host, ds = collector._vmware_get_inventory(content) assert host == { 'host:1': { 'name': 'host-1', 'dc': 'dc-1', 'cluster': '', }, 'host:2': { 'name': 'host-2', 'dc': 'dc-1', 'cluster': 'compute-cluster-1', } } assert ds == { 'datastore-1': { 'dc': 'dc-1', 'ds_cluster': '', }, 'datastore-2': { 'dc': 'dc-1', 'ds_cluster': 'datastore2-folder', } }