class VmManage(object): def __init__(self, host, user, password, port, ssl): self.host = host self.user = user self.pwd = password self.port = port self.sslContext = ssl try: self.client = SmartConnectNoSSL(host=host, user=user, pwd=password, port=443 ) self.content = self.client.RetrieveContent() self.result = True except Exception as e: self.result = False self.message = e def _get_all_objs(self, obj_type, folder=None): """ 根据对象类型获取这一类型的所有对象 """ if folder is None: container = self.content.viewManager.CreateContainerView(self.content.rootFolder, obj_type, True) else: container = self.content.viewManager.CreateContainerView(folder, obj_type, True) return container.view def _get_obj(self, obj_type, name): """ 根据对象类型和名称来获取具体对象 """ obj = None content = self.client.RetrieveContent() container = content.viewManager.CreateContainerView(content.rootFolder, obj_type, True) for c in container.view: if c.name == name: obj = c break return obj def get_datacenters(self): """ 返回所有的数据中心 """ return self._get_all_objs([vim.Datacenter]) def get_datacenter_by_name(self, datacenter_name): """ 根据数据中心名称获取数据中心对象 """ return self._get_all_objs([vim.Datacenter], datacenter_name)
def __init__(self, host, user, password, port, ssl): self.host = host self.user = user self.pwd = password self.port = port self.sslContext = ssl try: self.client = SmartConnectNoSSL(host=host, user=user, pwd=password, port=443) self.content = self.client.RetrieveContent() self.result = True except Exception as e: self.result = False self.message = e
def main(): args = get_args() si = None # Connect to the host without SSL signing try: si = SmartConnectNoSSL( host=args.host, user=args.user, pwd=args.password, port=int(args.port)) atexit.register(Disconnect, si) except IOError as e: pass if not si: raise SystemExit("Unable to connect to host with supplied info.") vm_obj = get_obj(si.content, [vim.VirtualMachine], args.vmname) if vm_obj is None: raise SystemExit("Unable to locate VirtualMachine.") if args.action == "create": if args.name is None: print("The Snapshot Name must be specified. -n <snapshot_name> or -name<snap_name>") else: create_snapshot(si, vm_obj, args.name, args.description, args.memory, args.quiesce) elif args.action == "list_all": view_all_snapshot(vm_obj) elif args.action == "list_current": view_current_snapshot(vm_obj) elif args.action == "delete": if args.snapshotname is None: print("Please specify the snapshot name that you want to delete with parameter -snapshotname") exit(1) delete_snapshot(si, vm_obj, args.snapshotname, args.child_snapshot_delete) elif args.action == "revert": if args.snapshotname is None: print("Please specify the snapshot name that you want to revert to with parameter -snapshotname") exit(1) revert_snapshot(si, vm_obj, args.snapshotname) elif args.action == "delete_all": print("Removing all snapshots for virtual machine %s" % vm_obj.name) task = vm_obj.RemoveAllSnapshots() wait_for_tasks(si, [task]) print("All Snapshots of the VM {} is removed".format(vm_obj.name)) else: print("Invalid Operation") del vm_obj
def main(): """ Iterate through all datacenters and list VM info. """ args = GetArgs() outputjson = True if args.json else False if args.password: password = args.password else: password = getpass.getpass(prompt='Enter password for host %s and ' 'user %s: ' % (args.host, args.user)) si = SmartConnectNoSSL(host=args.host, user=args.user, pwd=password, port=int(args.port)) if not si: print("Could not connect to the specified host using specified " "username and password") return -1 atexit.register(Disconnect, si) content = si.RetrieveContent() children = content.rootFolder.childEntity for child in children: # Iterate though DataCenters dc = child data[dc.name] = {} # Add data Centers to data dict clusters = dc.hostFolder.childEntity for cluster in clusters: # Iterate through the clusters in the DC # Add Clusters to data dict data[dc.name][cluster.name] = {} hosts = cluster.host # Variable to make pep8 compliance for host in hosts: # Iterate through Hosts in the Cluster hostname = host.summary.config.name # Add VMs to data dict by config name data[dc.name][cluster.name][hostname] = {} vms = host.vm for vm in vms: # Iterate through each VM on the host vmname = vm.summary.config.name data[dc.name][cluster.name][hostname][vmname] = {} summary = vmsummary(vm.summary, vm.guest) vm2dict(dc.name, cluster.name, hostname, vm, summary)
def vm_beat(host,user,pwd,port): try: si = SmartConnectNoSSL(host=host, user=user, pwd=pwd, port=port) #免SSL证书连接 atexit.register(Disconnect, si) content = si.RetrieveContent() view = pchelper.get_container_view(si, obj_type=[vim.VirtualMachine]) #多维度数据需要从不同视图获取 vm_data = pchelper.collect_properties(si, view_ref=view, obj_type=vim.VirtualMachine, include_mors=True) #path_set=vm_properties obj = content.viewManager.CreateContainerView(content.rootFolder, [vim.VirtualMachine], True) #多维度数据需要从不同视图获取 #将vm信息填到字典中 info = {} for vm in obj.view: # print(vm.name) # print('=' * 100) # print ('collecting...') info[vm.name] = {} info[vm.name]['cpu_MHz'] = vm.summary.quickStats.staticCpuEntitlement / 1024 # GHZ info[vm.name]['os'] = vm.summary.config.guestFullName info[vm.name]['vm_tools_status'] = vm.summary.guest.toolsStatus info[vm.name]['ip'] = vm.summary.guest.ipAddress for dev in vm.config.hardware.device: if isinstance(dev, vim.vm.device.VirtualEthernetCard): info[vm.name]['mac'] = dev.macAddress for vm in vm_data: #计算该虚拟机所有磁盘容量之和 disk_total = 0 # print vm['name'] for device in vm["config"].hardware.device: if isinstance(device, vim.vm.device.VirtualDisk): disk_total += int(device.deviceInfo.summary[0:-2].replace(",", "")) info[vm["name"]]["disk_GB"] = disk_total / 1024**2 # GB info[vm["name"]]["memory_MB"] = (vm["config"].hardware.memoryMB) / 1024 # GB info[vm["name"]]["cpu_num"] = vm["config"].hardware.numCPU # #TODO 追查具有两个mac地址的虚拟机那个是网卡以及如何赋值 # overallCpuUsage = 139,(单位MHz) # guestMemoryUsage = 327 指标是内存使用情况 except vmodl.MethodFault as error: print "Caught vmodl fault : " + error.msg return False, error.msg return info
def connect_vcenter(self): self.si = None try: self.si = SmartConnectNoSSL(host=self.host, user=self.username, pwd=self.pwd, port=int('443')) atexit.register(Disconnect, self.si) except vim.fault.InvalidLogin: raise SystemExit("Unable to connect to host " "with supplied credentials.") return self.si
def vc_connect(window): window.si = None try: window.si = SmartConnectNoSSL(host=window.host_text.text().strip(), user=window.username_text.text().strip(), pwd=window.password_text.text()) atexit.register(Disconnect, window.si) except vim.fault.InvalidLogin: msg = QtWidgets.QMessageBox() msg.setIcon(QtWidgets.QMessageBox.Information) msg.setText("Unable to connect to host" " with supplied credentials.") msg.setWindowTitle("Error") msg.setStandardButtons(QtWidgets.QMessageBox.Ok) msg.exec_() return except socket.error: msg = QtWidgets.QMessageBox() msg.setIcon(QtWidgets.QMessageBox.Information) msg.setText("Unable to connect to host") msg.setWindowTitle("Error") msg.setStandardButtons(QtWidgets.QMessageBox.Ok) msg.exec_() return except Exception as e: msg = QtWidgets.QMessageBox() msg.setIcon(QtWidgets.QMessageBox.Warning) msg.setText(e) msg.setWindowTitle("Error") msg.setStandardButtons(QtWidgets.QMessageBox.Ok) msg.exec_() return # render clone_vm window vc_main_gui.render_main_gui(window)
def main(): args = setup_args() #disable SSL certificate verification since most customers aren't going to set their NSX Manager up with a trusted CA if (not os.environ.get('PYTHONHTTPSVERIFY', '') and getattr(ssl, '_create_unverified_context', None)): ssl._create_default_https_context = ssl._create_unverified_context #set up common variables credstring = (args.nsx_manager_username + ":" + args.nsx_manager_password) creds = base64.b64encode(credstring.encode()).decode('ascii') headers = { 'Content-Type': 'application/xml', 'Authorization': 'Basic ' + creds } nsx_manager_address = args.nsx_manager_address nsx_license_key = args.key #connect to vcenter via SOAP try: si = SmartConnectNoSSL(host=args.host, user=args.user, pwd=args.password, port=args.port) atexit.register(Disconnect, si) except: print("Unable to connect to %s" % args.host) return 1 if args.datacenter: dc = get_dc(si, args.datacenter) else: dc = si.content.rootFolder.childEntity[0] # this is a testing statement to be removed later # it just proves we successfully pulled something from vcenter via SOAP print("Datacenter in use:") print(dc) # set up some more variables that require connection to vcenter to figure out dvs_moref = str(get_dvs_moref(dc, args.DVS)).replace( 'vim.dvs.VmwareDistributedVirtualSwitch:', '') dvs_moref = dvs_moref.replace("'", "") cluster_moref_list = [] cluster_prep_list = args.cluster_prep_list.split(",") for c in cluster_prep_list: cluster_id = str(get_cluster_moref(dc, c)).replace( 'vim.ClusterComputeResource:', '') cluster_id = cluster_id.replace("'", "") cluster_moref_list.append(cluster_id) # start the actual work here # register with SSO register_sso_status = register_nsx_with_lookup_service( headers, nsx_manager_address, args.lookup_service_address, args.user, args.password) print(*register_sso_status) # register with vCenter register_vcenter_status = register_nsx_with_vcenter( headers, nsx_manager_address, args.host, args.user, args.password) print(*register_vcenter_status) # Add NSX License Key and assign it to the NSX solution add_nsx_license_key(dc, si, nsx_license_key) # set the segment id range segment_id_status = set_segment_id_range(headers, nsx_manager_address) print(*segment_id_status) # create the IP Pool VTEP-Pool num_hosts = 2 #hard set the num_hosts to 2 for now until I can create the function to figure that out vtep_ip_pool_status = create_vtep_ip_pool(nsx_manager_address, headers, args.VTEP_IP_Range, args.VTEP_Mask, args.VTEP_Gateway, num_hosts, args.VTEP_DNS, args.VTEP_domain) vtep_ip_pool_id = vtep_ip_pool_status[1] print(*vtep_ip_pool_status) print(vtep_ip_pool_id) # create the IP Pool Controller-Pool controller_ip_pool_status = create_controller_ip_pool( nsx_manager_address, headers, args.Controller_IP_Range, args.Controller_Mask, args.Controller_Gateway, args.Controller_DNS, args.Controller_domain) controller_ip_pool_id = controller_ip_pool_status[1] print(*controller_ip_pool_status) print(controller_ip_pool_id) # Deploy three NSX controllers nsx_controller_status = deploy_nsx_controllers( headers, nsx_manager_address, args.Controller_Cluster, args.Controller_Datastores, args.Controller_Network, args.Controller_Password, controller_ip_pool_id, dc, si) print(*nsx_controller_status) # prepare the specified clusters for DFW prepare_clusters_for_dfw_status = prepare_clusters_for_dfw( headers, nsx_manager_address, cluster_moref_list) print(*prepare_clusters_for_dfw_status) # prepare the specified clusters for VXLAN prepare_clusters_for_vxlan_status = prepare_clusters_for_vxlan( headers, nsx_manager_address, cluster_moref_list, dvs_moref, args.VTEP_VLAN_ID, vtep_ip_pool_id) print(*prepare_clusters_for_vxlan_status) # Create Transport Zone "Primary" create_transport_zone_status = create_transport_zone( headers, nsx_manager_address, cluster_moref_list) print(*create_transport_zone_status)
def main(): args = setup_args() try: si = SmartConnectNoSSL(host=args.host, user=args.user, pwd=args.password, port=args.port) atexit.register(Disconnect, si) except: print("Unable to connect to %s" % args.host) return 1 if args.datacenter: dc = get_dc(si, args.datacenter) else: dc = si.content.rootFolder.childEntity[0] if args.datastore: ds = get_ds(dc, args.datastore) else: ds = get_largest_free_ds(dc) ovf_handle = OvfHandler(args.ova_path) ovfManager = si.content.ovfManager propertyMappingDict = { 'vsm_cli_passwd_0': args.vsm_cli_passwd_0, 'vsm_cli_en_passwd_0': args.vsm_cli_en_passwd_0, 'vsm_hostname': args.vsm_hostname, 'vsm_ip_0': args.vsm_ip_0, 'vsm_netmask_0': args.vsm_netmask_0, 'vsm_gateway_0': args.vsm_gateway_0, 'vsm_ntp_0': args.vsm_ntp_0, 'vsm_dns1_0': args.vsm_dns1_0 } mapping = [] for k in propertyMappingDict: v = propertyMappingDict[k] mapping.append(vim.KeyValue(key=k, value=v)) network = get_network(si, dc, args.map_eth0_to_network) cluster_rp = get_cluster(si, dc, args.cluster) network_map = vim.OvfManager.NetworkMapping() network_map.name = 'Management Network' network_map.network = network vmname = 'HCI-NSX-Manager-1' cisp = vim.OvfManager.CreateImportSpecParams(propertyMapping=mapping, entityName=vmname) cisp.networkMapping.append(network_map) cisr = ovfManager.CreateImportSpec(ovf_handle.get_descriptor(), cluster_rp, ds, cisp) # These errors might be handleable by supporting the parameters in # CreateImportSpecParams if len(cisr.error): print("The following errors will prevent import of this OVA:") for error in cisr.error: print("%s" % error) return 1 ovf_handle.set_spec(cisr) lease = cluster_rp.ImportVApp(cisr.importSpec, dc.vmFolder) while lease.state == vim.HttpNfcLease.State.initializing: print("Waiting for lease to be ready...") time.sleep(1) if lease.state == vim.HttpNfcLease.State.error: print("Lease error: %s" % lease.error) return 1 if lease.state == vim.HttpNfcLease.State.done: return 0 print("Starting deploy...") ovf_handle.upload_disks(lease, args.host) # Wait a little bit then try to power nsx manager on time.sleep(60) vmnames = vmname content = si.content objView = content.viewManager.CreateContainerView(content.rootFolder, [vim.VirtualMachine], True) vmList = objView.view objView.Destroy() tasks = [vm.PowerOn() for vm in vmList if vm.name in vmnames] print( "NSX Manager appliance is deployed. Please wait 10-15 minutes before running the configure_nsx_manager.py script as it can take a while for the services to fully start." )
def run(host, user, pwd, port): try: si = SmartConnectNoSSL(host=host, user=user, pwd=pwd, port=port) atexit.register(Disconnect, si) content = si.RetrieveContent() datacenter_info_list = [] for datacenter in content.rootFolder.childEntity: D = {'name': datacenter.name} # print "datacenter =", datacenter.name if hasattr(datacenter.hostFolder, 'childEntity'): hostFolder = datacenter.hostFolder computeResourceList = [] computeResourceList = getComputeResource( hostFolder, computeResourceList) for computeResource in computeResourceList: if isinstance(computeResource, vim.ClusterComputeResource): #只收集集群资源 D['cl_name'] = computeResource.name D['cl_cpu_capacity'] = 0 D['cl_cpu_used'] = 0 D['cl_mem_capacity'] = 0 D['cl_mem_used'] = 0 D['cl_storage_capacity'] = 0 D['cl_storage_used'] = 0 for host in computeResource.host: D['cl_cpu_capacity'] += format_size( host.summary.hardware.cpuMhz * host.summary.hardware.numCpuCores, 1000, 2) # GHz D['cl_cpu_used'] += format_size( host.summary.quickStats.overallCpuUsage, 1000, 2) # GHz D['cl_mem_capacity'] += format_size( host.summary.hardware.memorySize, 1000, 2) # GB D['cl_mem_used'] += format_size( host.summary.quickStats.overallMemoryUsage, 1000, 2) # GB # 计算每个数据中心的存储情况 for ds in computeResource.datastore: #获取该集群下的所有数据存储点名称 D['cl_storage_capacity'] += format_size( ds.summary.capacity, 1024**4, 2) # TB D['cl_storage_used'] += format_size( ds.summary.freeSpace, 1024**4, 2) # TB D['cl_storage_used'] = D['cl_storage_capacity'] - D[ 'cl_storage_used'] datacenter_info_list.append(D) print datacenter_info_list ####################################################################################### # ds_list = [] # ds_obj_list = get_obj(content, [vim.Datastore],) # for ds in ds_obj_list: # ds_list.append({'name':ds.name, 'related':[], 'capacity': ds.summary.capacity / 1024.0**3}) # for host in ds.host: # ds_list[-1]['related'].append(host.key.name) # # #计算每个ESXI关联磁盘总容量 # for esxi in esxi_list_info: # esxi['capacity'] = 0.00 # for ds in ds_list: # for i in ds['related']: #在DS中匹配ESXI # if i == esxi['name']: # esxi['capacity'] += ds['capacity'] #累加挂载点上的存储容量 # break return None except vmodl.MethodFault as error: print "Caught vmodl fault : " + error.msg return False, error.msg
break return obj parser = argparse.ArgumentParser() parser.add_argument('--u') parser.add_argument('--p') parser.add_argument('--vc') parser.add_argument('--vm') parser.add_argument('--vmu') parser.add_argument('--vmp') args= parser.parse_args() vc_port = '443' # This will connect us to vCenter s = SmartConnectNoSSL(host=args.vc, user=args.u, pwd=args.p, port=vc_port) # Establish the vCenter Obj content = s.RetrieveContent() # Assemble Creds creds = vim.vm.guest.NamePasswordAuthentication(username=args.vmu, password=args.vmp) # Lookup VM by Name vm_obj = get_obj(content, [vim.VirtualMachine], args.vm) # Establish ProcessMgr Obj pm = content.guestOperationsManager.processManager # Grab Processes from machine pl = pm.ListProcessesInGuest(vm_obj, creds)
def run(host, user, pwd, port): try: si = SmartConnectNoSSL(host=host, user=user, pwd=pwd, port=port) atexit.register(Disconnect, si) content = si.RetrieveContent() esxi_list_info = [] for datacenter in content.rootFolder.childEntity: # print "datacenter =", datacenter.name if hasattr(datacenter.hostFolder, 'childEntity'): hostFolder = datacenter.hostFolder computeResourceList = [] computeResourceList = getComputeResource( hostFolder, computeResourceList) for computeResource in computeResourceList: for host in computeResource.host: # print '='*100 esxi_list_info.append({ 'name': host.name, #主机名 'type': host.summary.hardware.vendor + ' ' + host.summary.hardware.model, #主机型号 'version': host.summary.config.product.fullName, #管理程序版本 'ip': host.name, #管理IP地址 'logic_core': host.summary.hardware.numCpuThreads, #cpu逻辑核心数 'cpy_type': host.summary.hardware.cpuModel, #cpu型号 'cpu_freq': host.summary.hardware.cpuMhz * host.summary.hardware.numCpuCores / 1000.00, #cpu频率GHz 'mem': '%.2f' % (host.summary.hardware.memorySize / 1024.0**3) #内存GB # host.summary.quickStats #使用状态简要指标 # host.summary.quickStats.overallCpuUsage / 1000.0 #CPU已用频率 GHz }) ds_list = [] ds_obj_list = get_obj( content, [vim.Datastore], ) for ds in ds_obj_list: ds_list.append({ 'name': ds.name, 'related': [], 'capacity': ds.summary.capacity / 1024.0**3 }) for host in ds.host: ds_list[-1]['related'].append(host.key.name) print ds_list #计算每个ESXI关联磁盘总容量 for esxi in esxi_list_info: esxi['capacity'] = 0.00 for ds in ds_list: for i in ds['related']: #在DS中匹配ESXI if i == esxi['name']: esxi['capacity'] += ds['capacity'] #累加挂载点上的存储容量 break return esxi_list_info except vmodl.MethodFault as error: print "Caught vmodl fault : " + error.msg return False, error.msg
def main(): args = setup_args() try: si = SmartConnectNoSSL(host=args.host, user=args.user, pwd=args.password, port=args.port) atexit.register(Disconnect, si) except: print("Unable to connect to %s" % args.host) return 1 ''' Obtain DVS, cluster, and DC information and set up variables ''' if args.datacenter: dc = get_dc(si, args.datacenter) else: dc = si.content.rootFolder.childEntity[0] dvswitchinfo = list_dvswitches(si) number_of_dvswitches = dvswitchinfo[0] dvswitchname = dvswitchinfo[1] clusterinfo = list_clusters(si) number_of_clusters = clusterinfo[0] clustername = clusterinfo[1] network_folder = dc.networkFolder ''' Check to see if there is more than one DVS, if so, cancel execution. This means it is not a fresh environment out of NDE ''' if number_of_dvswitches > 1: print( "More than one Distributed Virtual Switch is detected in this environment. This script is meant to be run " "immediately after NDE. Exiting...") return 1 ''' Check to see if there is more than one Cluster, if so, cancel execution. This means it is not a fresh environment out of NDE ''' if number_of_clusters > 1: print( "More than one Cluster is detected in this environment. This script is meant to be run immediately after " "NDE. Exiting...") return 1 ''' Build a dictionary of the objects for all the port groups ''' portgroup_info = list_portgroups_initial(si) portgroup_moref_dict = portgroup_info[0] portgroup_name_flag = portgroup_info[1] ''' Check to see if the portgroup_name_flag is nonzero. If so, it means list_portgroups() found a portgroup name that shouldn't exist. Again this means its not a fresh from NDE setup ''' if portgroup_name_flag == 1: print( "Found a portgroup name that should not exist. This script is meant to be run immediately after NDE. " "Exiting...") print("Offending Portgroup: " + portgroup_info[2]) return 1 ''' Get the VLAN ID from iSCSI-A''' vlan_id_from_iscsi_a = obtain_vlan_id_from_portgroup( portgroup_moref_dict.get("iSCSI-A")) ''' Temporarily rename the iSCSI port Groups on the Management DVS ''' temporary_rename_of_iscsi_portgroups(si) ''' Get the VLAN ID from vMotion''' vlan_id_from_vmotion = obtain_vlan_id_from_portgroup( portgroup_moref_dict.get("vMotion")) ''' Temporarily rename the vMotion port group on the Management DVS ''' temporary_rename_of_vmotion_portgroup(si) ''' Get the VLAN ID from VM Network''' vlan_id_from_vm = obtain_vlan_id_from_portgroup( portgroup_moref_dict.get("VM_Network")) ''' Temporarily rename the VM_Network port group on the Management DVS ''' temporary_rename_of_vm_portgroup(si) ''' Get the VLAN ID from the C&C PGs''' vlan_id_from_Management_Network = obtain_vlan_id_from_portgroup( portgroup_moref_dict.get("Management Network")) vlan_id_from_HCI_Internal_vCenter_Network = obtain_vlan_id_from_portgroup( portgroup_moref_dict.get("HCI_Internal_vCenter_Network")) vlan_id_from_HCI_Internal_mNode_Network = obtain_vlan_id_from_portgroup( portgroup_moref_dict.get("HCI_Internal_mNode_Network")) vlan_id_from_HCI_Internal_OTS_Network = obtain_vlan_id_from_portgroup( portgroup_moref_dict.get("HCI_Internal_OTS_Network")) ''' Temporarily rename the C&C port groups on the Management DVS ''' temporary_rename_of_cc_portgroups(si) ''' Create a dvs called "NetApp HCI Management" and attach it to the cluster ''' management_dvswitch_object = create_dvSwitch(si, network_folder, clusterinfo[2], "NetApp HCI Management") ''' Create a dvs called "NetApp HCI Storage" and attach it to the cluster ''' storage_dvswitch_object = create_dvSwitch(si, network_folder, clusterinfo[2], "NetApp HCI Storage") ''' Create a dvs called "NetApp HCI Compute" and attach it to the cluster ''' compute_dvswitch_object = create_dvSwitch(si, network_folder, clusterinfo[2], "NetApp HCI Compute") ''' Rename the uplink portgroups ''' rename_uplink_portgroups(si) ''' Add iSCSI-A and iSCSI-B to the storage DVS ''' add_dvPort_group(si, storage_dvswitch_object, "iSCSI-A", vlan_id_from_iscsi_a) add_dvPort_group(si, storage_dvswitch_object, "iSCSI-B", vlan_id_from_iscsi_a) ''' Add vMotion to the Compute DVS ''' add_dvPort_group(si, compute_dvswitch_object, "vMotion", vlan_id_from_vmotion) ''' Add VM_Network to the Compute DVS ''' add_dvPort_group(si, compute_dvswitch_object, "VM_Network", vlan_id_from_vm) ''' Add Management Network to the Management DVS''' add_dvPort_group(si, management_dvswitch_object, "Management Network", vlan_id_from_Management_Network) ''' Add C&C port groups ''' add_dvPort_group(si, management_dvswitch_object, "HCI_Internal_vCenter_Network", vlan_id_from_HCI_Internal_vCenter_Network) add_dvPort_group(si, management_dvswitch_object, "HCI_Internal_OTS_Network", vlan_id_from_HCI_Internal_OTS_Network) add_dvPort_group(si, management_dvswitch_object, "HCI_Internal_mNode_Network", vlan_id_from_HCI_Internal_mNode_Network) ''' Now its time to move the VMkernel IPs over to the new port groups''' """ move vmnic3 to the new Management DVS """ content = si.RetrieveContent() source_dvswitch = get_obj(content, [vim.DistributedVirtualSwitch], "NetApp HCI VDS") target_dvswitch = get_obj(content, [vim.DistributedVirtualSwitch], "NetApp HCI Management") for entity in dc.hostFolder.childEntity: for host in entity.host: print("Migrating vmnic3 on host:", host.name) unassign_pnic_list = [] s = str(host.config.network.proxySwitch) result = find_pnic_spec(s) # get one or more pnic specs for g in result: v = get_vmnic(g) x = v.split(':') if x[0] != "vmnic3": unassign_pnic_list.append(v) unassign_pnic(source_dvswitch, host, unassign_pnic_list) time.sleep(10) assign_pnic_list = ["vmnic3"] assign_pnic(target_dvswitch, host, assign_pnic_list) time.sleep(10) ''' relocate the c&c vms ''' list_of_vms_to_relocate = [ "NetApp-Management-Node", "vCenter-Server-Appliance", "File Services powered by ONTAP-01" ] for vmname in list_of_vms_to_relocate: vm = get_obj(content, [vim.VirtualMachine], vmname) vmtype = str(type(vm)) if vmtype == "<class 'pyVmomi.VmomiSupport.vim.VirtualMachine'>" and vmname == "vCenter-Server-Appliance": network = get_obj(content, [vim.DistributedVirtualPortgroup], "HCI_Internal_vCenter_Network") move_vm(vm, network) print("Successfully moved", vmname, "to new Management DVS") if vmtype == "<class 'pyVmomi.VmomiSupport.vim.VirtualMachine'>" and vmname == "NetApp-Management-Node": network = get_obj(content, [vim.DistributedVirtualPortgroup], "HCI_Internal_mNode_Network") move_vm(vm, network) print("Successfully moved", vmname, "to new Management DVS") if vmtype == "<class 'pyVmomi.VmomiSupport.vim.VirtualMachine'>" and vmname == "File Services powered by ONTAP-01": network = get_obj(content, [vim.DistributedVirtualPortgroup], "HCI_Internal_OTS_Network") move_vm(vm, network) print("Successfully moved", vmname, "to new Management DVS") time.sleep(5) target_portgroup = get_obj(content, [vim.DistributedVirtualPortgroup], "Management Network") for entity in dc.hostFolder.childEntity: for host in entity.host: print("Migrating vmnic2 / vmk0 on host:", host.name) migrate_vmk(host, target_portgroup, target_dvswitch, "vmk0") time.sleep(10) unassign_pnic_list = [] s = str(host.config.network.proxySwitch) result = find_pnic_spec(s) # get one or more pnic specs for g in result: v = get_vmnic(g) x = v.split(':') if x[0] != "vmnic2" and x[0] != "vmnic3": unassign_pnic_list.append(v) unassign_pnic(source_dvswitch, host, unassign_pnic_list) time.sleep(10) assign_pnic_list = ["vmnic2", "vmnic3"] assign_pnic(target_dvswitch, host, assign_pnic_list) time.sleep(10) ''' Move vmnic5 and its associated vmk to the storage dvs''' source_dvswitch = get_obj(content, [vim.DistributedVirtualSwitch], "NetApp HCI VDS") target_dvswitch = get_obj(content, [vim.DistributedVirtualSwitch], "NetApp HCI Storage") target_portgroup = get_obj(content, [vim.DistributedVirtualPortgroup], "iSCSI-A") for entity in dc.hostFolder.childEntity: for host in entity.host: print("Migrating vmnic5 / vmk1 on host:", host.name) migrate_vmk(host, target_portgroup, target_dvswitch, "vmk1") time.sleep(10) unassign_pnic_list = [] s = str(host.config.network.proxySwitch) result = find_pnic_spec(s) # get one or more pnic specs for g in result: v = get_vmnic(g) x = v.split(':') if x[0] != "vmnic5" and x[0] != "vmnic2" and x[0] != "vmnic3": unassign_pnic_list.append(v) unassign_pnic(source_dvswitch, host, unassign_pnic_list) time.sleep(10) assign_pnic_list = ["vmnic5"] assign_pnic(target_dvswitch, host, assign_pnic_list) time.sleep(10) ''' Move vmnic1 and its associated vmk to the storage dvs''' source_dvswitch = get_obj(content, [vim.DistributedVirtualSwitch], "NetApp HCI VDS") target_dvswitch = get_obj(content, [vim.DistributedVirtualSwitch], "NetApp HCI Storage") target_portgroup = get_obj(content, [vim.DistributedVirtualPortgroup], "iSCSI-B") for entity in dc.hostFolder.childEntity: for host in entity.host: print("Migrating vmnic1 / vmk2 on host:", host.name) migrate_vmk(host, target_portgroup, target_dvswitch, "vmk2") time.sleep(10) unassign_pnic_list = [] s = str(host.config.network.proxySwitch) result = find_pnic_spec(s) # get one or more pnic specs for g in result: v = get_vmnic(g) x = v.split(':') if x[0] != "vmnic1" and x[0] != "vmnic5" and x[ 0] != "vmnic2" and x[0] != "vmnic3": unassign_pnic_list.append(v) unassign_pnic(source_dvswitch, host, unassign_pnic_list) time.sleep(10) assign_pnic_list = ["vmnic5", "vmnic1"] assign_pnic(target_dvswitch, host, assign_pnic_list) time.sleep(10) """ Move vmnic0/vmnic4 and the vmotion vmk to the compute dvs""" source_dvswitch = get_obj(content, [vim.DistributedVirtualSwitch], "NetApp HCI VDS") target_dvswitch = get_obj(content, [vim.DistributedVirtualSwitch], "NetApp HCI Compute") target_portgroup = get_obj(content, [vim.DistributedVirtualPortgroup], "vMotion") for entity in dc.hostFolder.childEntity: for host in entity.host: print("Migrating vmnic0 / vmnic4 / vmk3 on host:", host.name) migrate_vmk(host, target_portgroup, target_dvswitch, "vmk3") time.sleep(10) unassign_pnic_list = [] unassign_pnic(source_dvswitch, host, unassign_pnic_list) time.sleep(10) assign_pnic_list = ["vmnic0", "vmnic4"] assign_pnic(target_dvswitch, host, assign_pnic_list) time.sleep(10) """ clean up the old port groups list_of_pgs_to_delete = ["iSCSI-A_1","iSCSI-B_1","vMotion_1","VM_Network_1","HCI_Internal_vCenter_Network_1","HCI_Internal_mNode_Network_1","HCI_Internal_OTS_Network_1","Management Network_1"] for pgname in list_of_pgs_to_delete: pg = get_obj(content, [vim.DistributedVirtualPortgroup], pgname) delete_portgroup(pg) print("Deleted portgroup", pgname) """ delete_dvs(dvswitchinfo[2]) print("DVS reconfiguration complete.")