def test_query_stats(self): self.vim_client._content.perfManager = MagicMock() counter_ids = [101, 102, 103] self.vim_client._content.perfManager.perfCounter = create_fake_counters( counter_ids) self.vim_client._content.perfManager.QueryPerf.return_value = [ vim.PerformanceManager.EntityMetricCSV( entity=vim.HostSystem('ha-host'), sampleInfoCSV='20,1970-01-01T00:00:10Z', value=[ vim.PerformanceManager.MetricSeriesCSV( id=vim.PerformanceManager.MetricId( counterId=counter_ids[0], instance=''), value='200') ]), vim.PerformanceManager.EntityMetricCSV( entity=vim.HostSystem('ha-host'), sampleInfoCSV='20,1970-01-01T00:00:10Z', value=[ vim.PerformanceManager.MetricSeriesCSV( id=vim.PerformanceManager.MetricId( counterId=counter_ids[1], instance=''), value='200') ]), ] since = datetime.now() - timedelta(seconds=20) results = self.vim_client.query_stats(since, None) # 2 through QueryPerf + 2 through Pyvmomi Host object assert_that(len(results), equal_to(4))
def detect_stdportgroup(context, host_name, network_name): """Find Distributed Switch based on host and network name""" # Ensure the standard switch is available on the host names = set([host_name]) # Use vAPI find the Host managed identities host_summaries = context.client.vcenter.Host.list( Host.FilterSpec(names=names)) for host_summary in host_summaries: # Convert the host identifier into a ManagedObject host = host_summary.host host_mo = vim.HostSystem(host, context.soap_stub) for network_mo in host_mo.network: if (type(network_mo) == vim.Network and network_mo.name == network_name): network = network_mo._moId print( "Detected Standard Portgroup '{}' as {} on Host '{}' ({})". format(network_name, network, host_name, host)) context.testbed.entities['HOST_STANDARD_SWITCH_IDS'][ host_name] = network return True print("Standard Portgroup '{}' missing on Host '{}'".format( network_name, host_name)) return False
def add_host_to_vdswitch(context, vdswitch_name, host_name, pnic_names=None): """Add host to Distributed Switch""" host = context.testbed.entities['HOST_IDS'][host_name] host_mo = vim.HostSystem(host, context.soap_stub) vdswitch = context.testbed.entities['DISTRIBUTED_SWITCH_IDS'][ vdswitch_name] vdswitch_mo = vim.DistributedVirtualSwitch(vdswitch, context.soap_stub) pnic_specs = [] if pnic_names: for pnic in pnic_names: pnic_specs.append(vim.dvs.HostMember.PnicSpec(pnicDevice=pnic)) dvs_member_config = vim.dvs.HostMember.ConfigSpec( operation="add", host=host_mo, backing=vim.dvs.HostMember.PnicBacking(pnicSpec=pnic_specs)) dvs_config = vim.DistributedVirtualSwitch.ConfigSpec( configVersion=vdswitch_mo.config.configVersion, host=[dvs_member_config]) task = vdswitch_mo.Reconfigure(dvs_config) pyVim.task.WaitForTask(task) print("Added Host '{}' ({}) to Distributed Switch '{}' ({})".format( host_name, host, vdswitch_name, vdswitch))
def detect_vmfs_datastore(context, host_name, datastore_name): """Find VMFS datastore given host and datastore names""" names = set([host_name]) # Use vAPI find the Host managed identities host_summaries = context.client.vcenter.Host.list( Host.FilterSpec(names=names)) for host_summary in host_summaries: # Convert the host identifier into a ManagedObject host = host_summary.host host_mo = vim.HostSystem(host, context.soap_stub) for datastore_mo in host_mo.datastore: if (datastore_mo.name == datastore_name and datastore_mo.summary.type == 'VMFS'): datastore = datastore_mo._moId print( "Detected VMFS Volume '{}' as {} on Host '{}' ({})".format( datastore_name, datastore, host_name, host)) context.testbed.entities['HOST_VMFS_DATASTORE_IDS'][host_name] \ = datastore return True print("VMFS Volume '{}' missing on Host '{}'".format( datastore_name, host_name)) return False
def cleanup_nfs_datastore(context): """Cleanup NFS datastore after running vcenter samples""" # Remove NFS datastore from each Host host1_name = context.testbed.config['ESX_HOST1'] host2_name = context.testbed.config['ESX_HOST2'] names = set([host1_name, host2_name]) datastore_name = context.testbed.config['NFS_DATASTORE_NAME'] # Use vAPI find the Host managed identities host_svc = Host(context.stub_config) host_summaries = host_svc.list(Host.FilterSpec(names=names)) for host_summary in host_summaries: # Convert the host identifier into a ManagedObject host = host_summary.host host_mo = vim.HostSystem(host, context.soap_stub) for datastore_mo in host_mo.datastore: if datastore_mo.name == datastore_name: datastore_system = host_mo.configManager.datastoreSystem datastore_system.RemoveDatastore(datastore_mo) print("Removed NFS Volume '{}' ({}) from Host '{}' ({})". format(datastore_name, datastore_mo._moId, host_mo.name, host_mo._moId))
def main(): args = GetArgs() if args.password: password = args.password else: password = getpass.getpass(prompt='Enter password for host %s and ' 'user %s: ' % (args.host, args.user)) #For python 2.7.9 and later, the defaul SSL conext has more strict #connection handshaking rule. We may need turn of the hostname checking #and client side cert verification context = None if sys.version_info[:3] > (2, 7, 8): context = ssl.create_default_context() context.check_hostname = False context.verify_mode = ssl.CERT_NONE # Disabling the annoying InsecureRequestWarning message requests.packages.urllib3.disable_warnings(InsecureRequestWarning) si = SmartConnect(host=args.host, user=args.user, pwd=password, port=int(args.port), sslContext=context) atexit.register(Disconnect, si) #for detecting whether the host is VC or ESXi aboutInfo = si.content.about if aboutInfo.apiType == 'VirtualCenter': majorApiVersion = aboutInfo.apiVersion.split('.')[0] if int(majorApiVersion) < 6: print( 'The Virtual Center with version %s (lower than 6.0) is not supported.' % aboutInfo.apiVersion) return -1 #Here is an example of how to access VC side VSAN API vcMos = vsanapiutils.GetVsanVcMos(si._stub, context=context) # Get VSAN Stretched Cluster System vscs = vcMos['vsan-stretched-cluster-system'] cluster = getClusterInstance(args.clusterName, si) if cluster is None: print("Cluster %s is not found for %s" % (args.clusterName, args.host)) return -1 # Retrieve Witness Host for given VSAN Cluster witnessHosts = vscs.VSANVcGetWitnessHosts(cluster=cluster) print("\nVSAN Witness VM for %s" % args.clusterName) for withnessHost in witnessHosts: host = (vim.HostSystem(withnessHost.host._moId, si._stub)) print("Host: %s" % host.name) print("Node UUID: %s" % withnessHost.nodeUuid) print("Prefer Fault Domain: %s\n" % withnessHost.preferredFdName)
def setUp(self): self.mock_perf_mgr = MagicMock() self.mock_perf_mgr.perfCounter = fake_get_counters() self.coll = PerfManagerCollector() metric_names = [ [], ["A.B", "C.D"], ["E.F", "G.H"], ["I.J", "I.K", "I.L"], ["M.N", "M.O", "M.O"] ] self.coll.metric_names = metric_names host = MagicMock(spec=vim.ManagedObject, key=vim.HostSystem("ha-host")) host.summary = MagicMock() host.summary.quickStats = MagicMock() host.summary.hardware = MagicMock() host.summary.quickStats.overallCpuUsage = MagicMock(return_value=100) host.summary.hardware.cpuMhz = MagicMock(return_value=2048) host.summary.hardware.numCpuCores = MagicMock(return_value=12) host.summary.quickStats.overallMemoryUsage = MagicMock(return_value=2500) host.summary.hardware.memorySize = MagicMock(return_value=4000) self.coll.get_perf_manager = MagicMock(return_value=self.mock_perf_mgr) self.coll.get_host_system = MagicMock( return_value=host) self.mock_vim = MagicMock() self.mock_vim.get_vms_in_cache.return_value = [MagicMock(name="fake-vm-id", project_id="p1", tenant_id="t1")] self.mock_vim.get_vm_obj_in_cache.return_value = vim.VirtualMachine('9') self.coll.get_vim_client = MagicMock(return_value=self.mock_vim)
def detect_nfs_datastore_on_host(context, host_name): """Find NFS datastore on host""" names = set([host_name]) datastore_name = context.testbed.config['NFS_DATASTORE_NAME'] # Use vAPI find the Host managed identities host_svc = Host(context.stub_config) host_summaries = host_svc.list(Host.FilterSpec(names=names)) for host_summary in host_summaries: # Convert the host identifier into a ManagedObject host = host_summary.host host_mo = vim.HostSystem(host, context.soap_stub) for datastore_mo in host_mo.datastore: if (datastore_mo.name == datastore_name and datastore_mo.summary.type == 'NFS'): datastore = datastore_mo._moId print("Detected NFS Volume '{}' as {} on Host '{}' ({})". format(datastore_name, datastore, host_name, host)) context.testbed.entities['HOST_NFS_DATASTORE_IDS'][host_name] \ = datastore return True print("NFS Volume '{}' missing on Host '{}'". format(datastore_name, host_name)) return False
def test_do_a_vmotion(self, mock_sleep, mock_ping, mock_wait, mock_spec, mock_host, mock_vm): """ Verify that proper mocked functions are called on vmotion call """ testvm = vim.VirtualMachine() testhost = vim.HostSystem() do_a_vmotion(testvm, testhost, '127.0.0.1') testvm.RelocateVM_Task.assert_called_once()
def isHostsConnected(cluster, witnessHosts, si): result = True for host in cluster.host: if not host.summary.runtime.connectionState == 'connected': result = False for witnesshost in witnessHosts: host = (vim.HostSystem(witnesshost.host._moId, si._stub)) if not host.summary.runtime.connectionState == 'connected': result = False return result
def test_fail_a_postvmotion(self, mock_sleep, mock_ping, mock_wait, mock_spec, mock_host, mock_vm): """ Verify that proper mocked functions are called post-vmotion ping fail """ testvm = vim.VirtualMachine() testhost = vim.HostSystem() mock_ping.side_effect = [True, False] with self.assertRaises(Exception, msg="Exception not raised when ping fails"): do_a_vmotion(testvm, testhost, '127.0.0.1') testvm.RelocateVM_Task.assert_called_once()
def test_fail_a_prevmotion(self, mock_sleep, mock_ping, mock_wait, mock_spec, mock_host, mock_vm): """ Verify that proper mocked functions are called on pre-vmotion ping fail """ testvm = vim.VirtualMachine() testhost = vim.HostSystem() mock_ping.return_value = False with self.assertRaises(Exception, msg="Exception not raised when ping fails"): do_a_vmotion(testvm, testhost, '127.0.0.1') testvm.RelocateVM_Task.assert_not_called()
def test_get_perf_manager_stats(self): self.coll.initialize_host_counters() now = datetime.now() since = now - timedelta(seconds=20) self.mock_perf_mgr.QueryPerf.return_value = [ vim.PerformanceManager.EntityMetricCSV( entity=vim.HostSystem('ha-host'), sampleInfoCSV='20,1970-01-01T00:00:10Z', value=[ vim.PerformanceManager.MetricSeriesCSV( id=vim.PerformanceManager.MetricId(counterId=6566, instance=''), value='200')] ), vim.PerformanceManager.EntityMetricCSV( entity=vim.VirtualMachine('9'), sampleInfoCSV='20,1970-01-01T00:00:10Z', value=[ vim.PerformanceManager.MetricSeriesCSV( id=vim.PerformanceManager.MetricId(counterId=6566, instance=''), value='100')] ) ] results = self.coll.get_perf_manager_stats(since, now) assert_that(len(results.keys()), is_(2)) assert_that(results, has_entries('vm.t1.p1.A.B', [(10.0, 100.0)], 'A.B', [(10.0, 200.0)])) assert_that(self.mock_perf_mgr.QueryPerf.call_count, is_(1)) expected_entity_refs = ["'vim.VirtualMachine:9'", "'vim.HostSystem:ha-host'"] for i in range(len(expected_entity_refs)): # ref_str = expected_entity_refs[i] query_spec = self.mock_perf_mgr.QueryPerf.call_args[0][0][i] assert_that(query_spec, instance_of(vim.PerformanceManager.QuerySpec)) assert_that(query_spec.intervalId, is_(20)) assert_that(query_spec.format, is_('csv')) assert_that(len(query_spec.metricId), is_(2)) # assert_that(str(query_spec.entity), is_(ref_str)) t_start = datetime.strptime( str(query_spec.startTime), '%Y-%m-%d %H:%M:%S.%f') t_end = datetime.strptime( str(query_spec.endTime), '%Y-%m-%d %H:%M:%S.%f') assert_that(t_end, equal_to(now)) assert_that(t_start, equal_to(since))
def test_create_host_config_spec(self): with patch.object(vim.HostSystem, '__init__', return_value=None) as (mock_constructor): self.vim_obj = vim.HostSystem() self.assertTrue(mock_constructor.called) with patch.object(DVSAdapter, '_get_free_physical_nic', return_value=['pnic1', 'pnic2' ]) as (mock_get_free_physical_nic): self.dvs_config._create_host_config_spec([{ 'obj': self.vim_obj }], 'pnic_device') self.assertTrue(mock_get_free_physical_nic.called)
def setUp(self): self.vim_client = VimClient(auto_sync=False) self.vim_client._content = MagicMock() self.host = MagicMock(spec=vim.ManagedObject, key=vim.HostSystem("ha-host")) self.host.summary = MagicMock() self.host.summary.quickStats = MagicMock() self.host.summary.hardware = MagicMock() self.host.summary.quickStats.overallCpuUsage = 1024 self.host.summary.hardware.cpuMhz = 1024 self.host.summary.hardware.numCpuCores = 2 self.host.summary.quickStats.overallMemoryUsage = 2 # 2GB self.host.summary.hardware.memorySize = 4 * 1024 * 1024 # 4GB self.vim_client.host_system = MagicMock(return_value=self.host)
def setup_nfs_datastore_on_host(context, host_name): """Mount the NFS volume on one ESX hosts using the VIM API.""" nfs_host = context.testbed.config['NFS_HOST'] remote_path = context.testbed.config['NFS_REMOTE_PATH'] local_path = context.testbed.config['NFS_DATASTORE_NAME'] host = context.testbed.entities['HOST_IDS'][host_name] host_mo = vim.HostSystem(host, context.soap_stub) datastore_system = host_mo.configManager.datastoreSystem try: datastore_mo = datastore_system.CreateNasDatastore( vim.host.NasVolume.Specification( remoteHost=nfs_host, remotePath=remote_path, localPath=local_path, accessMode=vim.host.MountInfo.AccessMode.readWrite, type=vim.host.FileSystemVolume.FileSystemType.NFS)) print("Added NFS Volume '{}' ({}) to Host '{}' ({})".format( local_path, datastore_mo._moId, host_name, host)) return datastore_mo._moId except vim.fault.AlreadyExists as e: print("NFS Volume '{}' already exists on Host '{}' ({})".format( local_path, host_name, host)) for datastore_mo in host_mo.datastore: info = datastore_mo.info if (isinstance(info, vim.host.NasDatastoreInfo) and info.nas.remoteHost == nfs_host and info.nas.remotePath == remote_path): if info.name == local_path: print( "Found NFS Volume '{}' ({}) on Host '{}' ({})".format( local_path, datastore_mo._moId, host_name, host_mo._moId)) return datastore_mo._moId else: print( "Found NFS remote host '{}' and path '{}' on Host '{}' ({}) as '{}'" .format(nfs_host, remote_path, host_name, host_mo._moId, info.name)) print("Renaming NFS Volume '{}' ({}) to '{}'".format( info.name, datastore_mo._moId, local_path)) task = datastore_mo.Rename(local_path) pyVim.task.WaitForTask(task) # TODO Find the datastore identifier for the NFS volume and return it return None
def test_canary_test(self, mock_sleep, mock_vmotion, mock_fh, mock_go, mock_vm, mock_si, mock_hs): """ Verify that canary calls cause vmotions to be called for """ test_si = vim.ServiceInstance() test_vm = vim.VirtualMachine() mock_go.return_value = test_vm host = vim.HostSystem() host.name = 'Foo' mock_fh.return_value = host canary_test(test_si, ['host1', 'host2'], 'vmname', False) test_si.RetrieveContent.assert_called_once() self.assertEqual(mock_vmotion.call_count, 2, "Two vmotions should occur")
def test_canary_test_vmname(self, mock_sleep, mock_vmotion, mock_fh, mock_go, mock_vm, mock_si, mock_hs): """ Given a canary VM name, vmotions get called for. """ test_si = vim.ServiceInstance() test_vm = vim.VirtualMachine() test_vm.guest.ipAddress = None test_vm.name = "Bob" mock_go.return_value = test_vm host = vim.HostSystem() host.name = 'Foo' mock_fh.return_value = host canary_test(test_si, ['host1', 'host2'], test_vm.name, False) test_si.RetrieveContent.assert_called_once() self.assertEqual(mock_vmotion.call_count, 2, "Two vmotions should occur") mock_vmotion.assert_called_with(test_vm, host, test_vm.name, False)
def query_performance(moid, histlevel, perftype): try: si = connect.Connect(vcenterip, vcenterport, vcenteruser, vcenterpass) content = si.RetrieveContent() perfManager = content.perfManager micpu = vim.PerformanceManager.MetricId(counterId=2, instance="") midisk1 = vim.PerformanceManager.MetricId(counterId=125, instance="") midisk2 = vim.PerformanceManager.MetricId(counterId=133, instance="") mimem = vim.PerformanceManager.MetricId(counterId=24, instance="") minet = vim.PerformanceManager.MetricId(counterId=143, instance="") #startTime = datetime.datetime.strptime(str(datetime.date.today() - datetime.timedelta(days=1)), '%Y-%m-%d').replace(tzinfo=pytz.timezone('Asia/Shanghai')).astimezone(pytz.utc) #endTime = datetime.datetime.strptime(str(datetime.date.today()), '%Y-%m-%d').replace(tzinfo=pytz.timezone('Asia/Shanghai')).astimezone(pytz.utc) endTime = datetime.datetime.now() - datetime.timedelta(minutes=10) intervalId = 20 if histlevel == 'day': intervalId = 300 elif histlevel == 'week': intervalId = 1800 elif histlevel == 'month': intervalId = 7200 else: intervalId = 20 endTime = None if perftype == 'vm': vm = vim.VirtualMachine(moid) query = vim.PerformanceManager.QuerySpec( intervalId=intervalId, entity=vm, metricId=[micpu, midisk1, midisk2, mimem, minet], endTime=endTime) elif perftype == 'host': host = vim.HostSystem(moid) query = vim.PerformanceManager.QuerySpec( intervalId=intervalId, entity=host, metricId=[micpu, midisk1, midisk2, mimem, minet], endTime=endTime) perfResults = perfManager.QueryPerf(querySpec=[query]) return perfResults except vmodl.MethodFault as error: print("Caught vmodl fault : " + error.msg)
def test_canary_test_failure(self, mock_sleep, mock_vmotion, mock_fh, mock_go, mock_vm, mock_si, mock_hs): """ Verify that if an exception occurs, that a) the vmotion was asked for, and b) an exception happens """ test_si = vim.ServiceInstance() test_vm = vim.VirtualMachine() mock_go.return_value = test_vm host = vim.HostSystem() host.name = 'Foo' mock_fh.return_value = host canarytest.vsphere_tools.do_a_vmotion.side_effect = Exception( 'vmotion raised an exception Failed') with self.assertRaises(Exception): canary_test(test_si, ['host1', 'host2'], 'vmname', False) test_si.RetrieveContent.assert_called_once() self.assertEqual(mock_vmotion.call_count, 1, "One vmotions should occur, raising the exception")
def setup_vmfs_datastore(context, host_name, datastore_name): """Find VMFS datastore given host and datastore names""" context.testbed.entities['HOST_VMFS_DATASTORE_IDS'] = {} names = set([host_name]) # Use vAPI find the Host managed identities host_svc = Host(context.stub_config) host_summaries = host_svc.list(Host.FilterSpec(names=names)) host_summary = host_summaries[0] # Convert the host identifier into a ManagedObject host = host_summary.host host_mo = vim.HostSystem(host, context.soap_stub) vmfs_datastores = dict([(datastore_mo.name, datastore_mo) for datastore_mo in host_mo.datastore if datastore_mo.summary.type == 'VMFS']) # The VMFS volume exists. No need to do anything if datastore_name in vmfs_datastores: datastore = vmfs_datastores[datastore_name]._moId print("Detected VMFS Volume '{}' as {} on Host '{}' ({})". format(datastore_name, datastore, host_name, host)) context.testbed.entities['HOST_VMFS_DATASTORE_IDS'][host_name] \ = datastore return True # Rename a VMFS datastore if len(vmfs_datastores) > 0: datastore_mo = list(vmfs_datastores.values())[0] datastore = datastore_mo._moId print("Renaming VMFS Volume '{}' ({}) on Host '{}' ({}) to '{}'". format(datastore_mo.name, datastore, host_name, host, datastore_name)) task = datastore_mo.Rename(datastore_name) pyVim.task.WaitForTask(task) return True return False
def getInformations(witnessHosts, cluster, si): uuid = {} hostnames = {} disks = {} # Get Host and disks informations for host in cluster.host: # Get relationship between host id and hostname hostnames[host.summary.host] = host.summary.config.name # Get all disk (cache and capcity) attached to hosts in the cluster diskAll = host.configManager.vsanSystem.QueryDisksForVsan() for disk in diskAll: if disk.state == 'inUse': uuid[disk.vsanUuid] = disk.disk.canonicalName disks[disk.vsanUuid] = host.summary.config.name for vsanHostConfig in cluster.configurationEx.vsanHostConfig: uuid[vsanHostConfig.clusterInfo.nodeUuid] = hostnames[ vsanHostConfig.hostSystem] # Get witness disks informations for witnessHost in witnessHosts: host = (vim.HostSystem(witnessHost.host._moId, si._stub)) uuid[witnessHost.nodeUuid] = host.name diskWitness = host.configManager.vsanSystem.QueryDisksForVsan() for disk in diskWitness: if disk.state == 'inUse': uuid[disk.vsanUuid] = disk.disk.canonicalName disks[disk.vsanUuid] = host.name return uuid, disks
def esx_perf_query(key, si): content = si.RetrieveContent() search_index = si.content.searchIndex #print(search_index) host = vim.HostSystem('host-27615') perfManager = content.perfManager metricId = vim.PerformanceManager.MetricId(counterId=6, instance="*") startTime = datetime.datetime.now() - datetime.timedelta(hours=1) endTime = datetime.datetime.now() query = vim.PerformanceManager.QuerySpec(maxSample=1, entity=host, metricId=[metricId], startTime=startTime, endTime=endTime) print(perfManager.QueryPerf(querySpec=[query])) return
def test_retrieve_properties(self): """Test the retrieve_properties method.""" content = Mock() content.rootFolder = vim.Folder('group-d1') objects_first_page = [ vim.ObjectContent(obj=vim.Folder('group-d1'), ), vim.ObjectContent(obj=vim.ClusterComputeResource('domain-c1'), ), ] objects_second_page = [ vim.ObjectContent(obj=vim.HostSystem('host-1'), ), vim.ObjectContent(obj=vim.VirtualMachine('vm-1'), ), ] content.propertyCollector.RetrievePropertiesEx(ANY).token = '1' content.propertyCollector.RetrievePropertiesEx(ANY).objects = \ objects_first_page content.propertyCollector.ContinueRetrievePropertiesEx(ANY).token = \ None content.propertyCollector.ContinueRetrievePropertiesEx(ANY).objects = \ objects_second_page with patch.object(InspectTaskRunner, 'parse_parent_props') \ as mock_parse_parent_props, patch.object(InspectTaskRunner, 'parse_cluster_props') \ as mock_parse_cluster_props, patch.object(InspectTaskRunner, 'parse_host_props')\ as mock_parse_host_props, patch.object(InspectTaskRunner, 'parse_vm_props') \ as mock_parse_vm_props: self.runner.retrieve_properties(content) mock_parse_parent_props.assert_called_with(ANY, ANY) mock_parse_cluster_props.assert_called_with(ANY, ANY) mock_parse_host_props.assert_called_with(ANY, ANY) mock_parse_vm_props.assert_called_with(ANY, ANY)
def move_host_into_cluster_vim(context, host_name, cluster_name): """Use vim api to move host to another cluster""" TIMEOUT = 30 # sec host = context.testbed.entities['HOST_IDS'][host_name] host_mo = vim.HostSystem(host, context.soap_stub) # Move the host into the cluster if not host_mo.runtime.inMaintenanceMode: task = host_mo.EnterMaintenanceMode(TIMEOUT) pyVim.task.WaitForTask(task) print("Host '{}' ({}) in maintenance mode".format(host, host_name)) cluster = context.testbed.entities['CLUSTER_IDS'][cluster_name] cluster_mo = vim.ClusterComputeResource(cluster, context.soap_stub) task = cluster_mo.MoveInto([host_mo]) pyVim.task.WaitForTask(task) print("Host '{}' ({}) moved into Cluster {} ({})".format( host, host_name, cluster, cluster_name)) task = host_mo.ExitMaintenanceMode(TIMEOUT) pyVim.task.WaitForTask(task) print("Host '{}' ({}) out of maintenance mode".format(host, host_name))
def getPerformance(args, tagsbase): result = "" # Don't check for valid certificate context = ssl._create_unverified_context() si, content, cluster_obj = connectvCenter(args, context) # Disconnect to vcenter at the end atexit.register(Disconnect, si) apiVersion = vsanapiutils.GetLatestVmodlVersion(args.vcenter) vcMos = vsanapiutils.GetVsanVcMos(si._stub, context=context, version=apiVersion) vsanVcStretchedClusterSystem = vcMos['vsan-stretched-cluster-system'] vsanPerfSystem = vcMos['vsan-performance-manager'] # Get VM uuid/names vms = getVMs(cluster_obj) # Get uuid/names relationship informations for hosts and disks uuid, disks = getInformations(content, cluster_obj) # Witness # Retrieve Witness Host for given VSAN Cluster witnessHosts = vsanVcStretchedClusterSystem.VSANVcGetWitnessHosts( cluster=cluster_obj ) for witnessHost in witnessHosts: host = (vim.HostSystem(witnessHost.host._moId, si._stub)) uuid[witnessHost.nodeUuid] = host.name diskWitness = host.configManager.vsanSystem.QueryDisksForVsan() for disk in diskWitness: if disk.state == 'inUse': uuid[disk.vsanUuid] = disk.disk.canonicalName disks[disk.vsanUuid] = host.name # Gather a list of the available entity types (ex: vsan-host-net) entityTypes = vsanPerfSystem.VsanPerfGetSupportedEntityTypes() # query interval, last 10 minutes -- UTC !!! endTime = datetime.utcnow() startTime = endTime + timedelta(minutes=-10) splitSkipentitytypes = [] if args.skipentitytypes: splitSkipentitytypes = args.skipentitytypes.split(',') for entities in entityTypes: if entities.name not in splitSkipentitytypes: entitieName = entities.name labels = [] # Gather all labels related to the entity (ex: iopsread, iopswrite...) for entity in entities.graphs: for metric in entity.metrics: labels.append(metric.label) # Build entity entity = '%s:*' % (entities.name) # Build spec object spec = vim.cluster.VsanPerfQuerySpec( endTime=endTime, entityRefId=entity, labels=labels, startTime=startTime ) # Get statistics try: metrics = vsanPerfSystem.VsanPerfQueryPerf( querySpecs=[spec], cluster=cluster_obj ) except vmodl.fault.InvalidArgument as e: print("Caught InvalidArgument exception : " + str(e)) return -1 except vmodl.fault.NotFound as e: print("Caught NotFound exception : " + str(e)) return -1 except vmodl.fault.NotSupported as e: print("Caught NotSupported exception : " + str(e)) return -1 except vmodl.fault.RuntimeFault as e: print("Caught RuntimeFault exception : " + str(e)) return -1 except vmodl.fault.Timedout as e: print("Caught Timedout exception : " + str(e)) return -1 except vmodl.fault.VsanNodeNotMaster as e: print("Caught VsanNodeNotMaster exception : " + str(e)) return -1 for metric in metrics: if not metric.sampleInfo == "": measurement = entitieName sampleInfos = metric.sampleInfo.split(",") lenValues = len(sampleInfos) timestamp = convertStrToTimestamp(sampleInfos[lenValues - 1]) tags = parseEntityRefId(measurement, metric.entityRefId, uuid, vms, disks) tags.update(tagsbase) fields = {} for value in metric.value: listValue = value.values.split(",") fields[value.metricId.label] = float(listValue[lenValues - 1]) result = result + formatInfluxLineProtocol(measurement, tags, fields, timestamp) print(result)
def run(self, ids=None, names=None, datastores=None, datastore_clusters=None, resource_pools=None, vapps=None, hosts=None, folders=None, clusters=None, datacenters=None, virtual_switches=None, no_recursion=False, vsphere=None): # TODO: food for thought. PowerCli contains additional # parameters that are not present here for the folliwing reason: # <server> - we may need to bring it in if we decide to have # connections to more than 1 VC. # <tag> - Tags in VC are not the same as tags you see in Web # Client for the reason, that those tags are stored # in Inventory Service only. PowerCli somehow can access # it, from vSphere SDK there is no way. self.establish_connection(vsphere) props = ['config.guestFullName', 'name', 'runtime.powerState'] moid_to_vm = {} # getting vms by their ids vms_from_vmids = [] if ids: vms_from_vmids = [ vim.VirtualMachine(moid, stub=self.si._stub) for moid in ids ] GetVMs.__add_vm_properties_to_map_from_vm_array( moid_to_vm, vms_from_vmids) # getting vms by their names vms_from_names = [] if names: container = self.si_content.viewManager.CreateContainerView( self.si_content.rootFolder, [vim.VirtualMachine], True) for vm in container.view: if vm.name in names: vms_from_names.append(vm) GetVMs.__add_vm_properties_to_map_from_vm_array( moid_to_vm, vms_from_names) # getting vms from datastore objects vms_from_datastores = [] if datastores: vim_datastores = [ vim.Datastore(moid, stub=self.si._stub) for moid in datastores ] for ds in vim_datastores: vms_from_datastores.extend(ds.vm) GetVMs.__add_vm_properties_to_map_from_vm_array( moid_to_vm, vms_from_datastores) # getting vms from datastore cluster objects vms_from_datastore_clusters = [] if datastore_clusters: vim_datastore_clusters = [ vim.StoragePod(moid, stub=self.si._stub) for moid in datastore_clusters ] for ds_cl in vim_datastore_clusters: for ds in ds_cl.childEntity: vms_from_datastore_clusters.extend(ds.vm) GetVMs.__add_vm_properties_to_map_from_vm_array( moid_to_vm, vms_from_datastore_clusters) # getting vms from virtual switch objects vms_from_virtual_switches = [] if virtual_switches: vim_virtual_switches = [ vim.DistributedVirtualSwitch(moid, stub=self.si._stub) for moid in virtual_switches ] for vswitch in vim_virtual_switches: for pg in vswitch.portgroup: vms_from_virtual_switches.extend(pg.vm) GetVMs.__add_vm_properties_to_map_from_vm_array( moid_to_vm, vms_from_virtual_switches) # getting vms from containers (location param) vms_from_containers = [] containers = [] if resource_pools: containers += [ vim.ResourcePool(moid, stub=self.si._stub) for moid in resource_pools ] if vapps: containers += [ vim.VirtualApp(moid, stub=self.si._stub) for moid in vapps ] if hosts: containers += [ vim.HostSystem(moid, stub=self.si._stub) for moid in hosts ] if folders: containers += [ vim.Folder(moid, stub=self.si._stub) for moid in folders ] if clusters: containers += [ vim.ComputeResource(moid, stub=self.si._stub) for moid in clusters ] if datacenters: containers += [ vim.Datacenter(moid, stub=self.si._stub) for moid in datacenters ] for cont in containers: objView = self.si_content.viewManager.CreateContainerView( cont, [vim.VirtualMachine], not no_recursion) tSpec = vim.PropertyCollector.TraversalSpec( name='tSpecName', path='view', skip=False, type=vim.view.ContainerView) pSpec = vim.PropertyCollector.PropertySpec(all=False, pathSet=props, type=vim.VirtualMachine) oSpec = vim.PropertyCollector.ObjectSpec(obj=objView, selectSet=[tSpec], skip=False) pfSpec = vim.PropertyCollector.FilterSpec( objectSet=[oSpec], propSet=[pSpec], reportMissingObjectsInResults=False) retOptions = vim.PropertyCollector.RetrieveOptions() retProps = self.si_content.propertyCollector.RetrievePropertiesEx( specSet=[pfSpec], options=retOptions) vms_from_containers += retProps.objects while retProps.token: retProps = self.si_content.propertyCollector.\ ContinueRetrievePropertiesEx( token=retProps.token) vms_from_containers += retProps.objects objView.Destroy() for vm in vms_from_containers: if vm.obj._GetMoId() not in moid_to_vm: moid_to_vm[vm.obj._GetMoId()] = { "moid": vm.obj._GetMoId(), "name": vm.propSet[1].val, "os": vm.propSet[0].val, "runtime.powerState": vm.propSet[2].val } return moid_to_vm.values()
def test_parse_vm_props(self, mock_dt): """Test the parse_vm_props method.""" mock_dt.utcnow.return_value = datetime(2000, 1, 1, 4, 20) ip_addresses, mac_addresses = ['1.2.3.4'], ['00:50:56:9e:09:8c'] facts = { "name": "vm1", "guest.net": "", # mac/ip addr returned by get_nics "summary.runtime.powerState": "poweredOn", "summary.guest.hostName": "hostname", "summary.config.guestFullName": "Red Hat 7", "summary.config.memorySizeMB": 1024, "summary.config.numCpu": 4, "summary.config.uuid": "1111", "runtime.host": "", "config.template": False, } # runs through host_facts values host_facts = { 'host.name': 'host1', 'host.cpu_cores': 12, 'host.cpu_count': 2, 'host.cpu_threads': 24, 'host.cluster': 'cluster1', 'host.datacenter': 'dc1' } host = vim.HostSystem('host-1') host_dict = {} host_dict[str(host)] = host_facts props = [] for key in facts: prop = Mock() prop.name, prop.val = key, facts[key] if key == 'runtime.host': prop.val = host props.append(prop) self.scan_task.update_stats('TEST_VC.', sys_count=5, sys_failed=0, sys_scanned=0, sys_unreachable=0) with patch('scanner.vcenter.inspect.get_nics', return_value=(mac_addresses, ip_addresses)): self.runner.parse_vm_props(props, host_dict) inspect_result = self.scan_task.inspection_result sys_results = inspect_result.systems.all() expected_facts = { "vm.cluster": "cluster1", "vm.cpu_count": 4, "vm.datacenter": "dc1", "vm.dns_name": "hostname", "vm.host.cpu_cores": 12, "vm.host.cpu_count": 2, "vm.host.cpu_threads": 24, "vm.host.name": "host1", "vm.ip_addresses": ["1.2.3.4"], "vm.is_template": False, "vm.mac_addresses": ["00:50:56:9e:09:8c"], "vm.memory_size": 1, "vm.name": "vm1", "vm.os": "Red Hat 7", "vm.state": "poweredOn", "vm.last_check_in": "2000-01-01 04:20:00", "vm.uuid": "1111", } sys_fact = {} for raw_fact in sys_results.first().facts.all(): # Must read as JSON as this is what task.py does sys_fact[raw_fact.name] = json.loads(raw_fact.value) self.assertEqual(1, len(sys_results)) self.assertEqual('vm1', sys_results.first().name) self.assertEqual(expected_facts, sys_fact)
def main(): # Don't check for valid certificate context = ssl._create_unverified_context() # Parse CLI arguments args = get_args() # Connect to vCenter try: si = SmartConnect(host=args.vcenter, user=args.user, pwd=args.password, port=int(args.port), sslContext=context) if not si: print( "Could not connect to the specified host using specified " "username and password") return -1 except vmodl.MethodFault as e: print("Caught vmodl fault : " + e.msg) return -1 except Exception as e: print("Caught exception : " + str(e)) return -1 # Disconnect to vcenter at the end atexit.register(Disconnect, si) # Get content informations content = si.RetrieveContent() # Get Info about cluster cluster_obj = getClusterInstance(args.clusterName, content) # Exit if the cluster provided in the arguments is not available if not cluster_obj: print 'The required cluster not found in inventory, validate input.' exit() # Initiate tags with vcenter and cluster name tagsbase = {} tagsbase['vcenter'] = args.vcenter tagsbase['cluster'] = args.clusterName apiVersion = vsanapiutils.GetLatestVmodlVersion(args.vcenter) vcMos = vsanapiutils.GetVsanVcMos(si._stub, context=context, version=apiVersion) ## CAPACITY if args.capacity: vsanSpaceReportSystem = vcMos['vsan-cluster-space-report-system'] try: spaceReport = vsanSpaceReportSystem.VsanQuerySpaceUsage( cluster=cluster_obj) except vmodl.fault.InvalidArgument as e: print("Caught InvalidArgument exception : " + str(e)) return -1 except vmodl.fault.NotSupported as e: print("Caught NotSupported exception : " + str(e)) return -1 except vmodl.fault.RuntimeFault as e: print("Caught RuntimeFault exception : " + str(e)) return -1 timestamp = int(time.time() * 1000000000) parseCapacity('global', spaceReport, tagsbase, timestamp) parseCapacity('summary', spaceReport, tagsbase, timestamp) if spaceReport.efficientCapacity: parseCapacity('efficientcapacity', spaceReport, tagsbase, timestamp) for object in spaceReport.spaceDetail.spaceUsageByObjectType: parseCapacity(object.objType, object, tagsbase, timestamp) ## PERFORMANCE if args.performance: # Get VM uuid/names vms = getVMs(content) # Get disks uuid/names et hosts uuid/names diskinfos, hostinfos = getHostsInfos(cluster_obj) #### Witness vsanVcStretchedClusterSystem = vcMos['vsan-stretched-cluster-system'] # Retrieve Witness Host for given VSAN Cluster witnessHosts = vsanVcStretchedClusterSystem.VSANVcGetWitnessHosts( cluster=cluster_obj) for witnessHost in witnessHosts: host = (vim.HostSystem(witnessHost.host._moId, si._stub)) hostinfos[witnessHost.nodeUuid] = host.name diskWitness = host.configManager.vsanSystem.QueryDisksForVsan() for disk in diskWitness: if disk.state == 'inUse': diskinfos[disk.vsanUuid] = disk.disk.canonicalName vsanPerfSystem = vcMos['vsan-performance-manager'] # Gather a list of the available entity types (ex: vsan-host-net) entityTypes = vsanPerfSystem.VsanPerfGetSupportedEntityTypes() # query interval, last 10 minutes endTime = datetime.utcnow() startTime = endTime + timedelta(minutes=-10) splitSkipentitytypes = [] if args.skipentitytypes: splitSkipentitytypes = args.skipentitytypes.split(',') for entities in entityTypes: if entities.name not in splitSkipentitytypes: entitieName = entities.name labels = [] # Gather all labels related to the entity (ex: iopsread, iopswrite...) for entity in entities.graphs: for metric in entity.metrics: labels.append(metric.label) # Build entity entity = '%s:*' % (entities.name) # Build spec object spec = vim.cluster.VsanPerfQuerySpec(endTime=endTime, entityRefId=entity, labels=labels, startTime=startTime) # Get statistics try: metrics = vsanPerfSystem.VsanPerfQueryPerf( querySpecs=[spec], cluster=cluster_obj) except vmodl.fault.InvalidArgument as e: print("Caught InvalidArgument exception : " + str(e)) return -1 except vmodl.fault.NotFound as e: print("Caught NotFound exception : " + str(e)) return -1 except vmodl.fault.NotSupported as e: print("Caught NotSupported exception : " + str(e)) return -1 except vmodl.fault.RuntimeFault as e: print("Caught RuntimeFault exception : " + str(e)) return -1 except vmodl.fault.Timedout as e: print("Caught Timedout exception : " + str(e)) return -1 except vmodl.fault.VsanNodeNotMaster as e: print("Caught VsanNodeNotMaster exception : " + str(e)) return -1 for metric in metrics: if not metric.sampleInfo == "": measurement = entitieName sampleInfos = metric.sampleInfo.split(",") lenValues = len(sampleInfos) timestamp = convertStrToTimestamp( sampleInfos[lenValues - 1]) tags = parseEntityRefId(measurement, metric.entityRefId, hostinfos, vms, diskinfos) tags.update(tagsbase) fields = {} for value in metric.value: listValue = value.values.split(",") fields[value.metricId.label] = float( listValue[lenValues - 1]) printInfluxLineProtocol(measurement, tags, fields, timestamp) return 0