def erase_partition(self, host, device_path): """ Erase the partitions on the disk Args: host (vim.HostSystem): Host instance device_path (str): Device path to erase the partition e.g:"/vmfs/devices/disks/naa.910229801b540c0125ef160f3048faba" """ # set empty partition spec spec = vim.HostDiskPartitionSpec() host.configManager.storageSystem.UpdateDiskPartitions(device_path, spec)
def main(): args = GetArgs() if args.password: password = args.password else: password = getpass.getpass(prompt='Enter password for host %s and ' 'user %s: ' % (args.host, args.user)) # For python 2.7.9 and later, the defaul SSL conext has more strict # connection handshaking rule. We may need turn of the hostname checking # and client side cert verification context = None if sys.version_info[:3] > (2, 7, 8): context = ssl.create_default_context() context.check_hostname = False context.verify_mode = ssl.CERT_NONE si = SmartConnect(host=args.host, user=args.user, pwd=password, port=int(args.port), sslContext=context) atexit.register(Disconnect, si) cluster = getClusterInstance(args.clusterName, si) if args.vsanlicense: print 'Assign VSAN license' lm = si.content.licenseManager lam = lm.licenseAssignmentManager lam.UpdateAssignedLicense(entity=cluster._moId, licenseKey=args.vsanlicense) vcMos = vsanapiutils.GetVsanVcMos(si._stub, context=context) vsanClusterSystem = vcMos['vsan-cluster-config-system'] vsanVcDiskManagementSystem = vcMos['vsan-disk-management-system'] isallFlash = args.allflash print 'Enable VSAN with {} mode'.format( 'all flash ' if isallFlash else 'hybrid') hostProps = CollectMultiple( si.content, cluster.host, ['name', 'configManager.vsanSystem', 'configManager.storageSystem']) hosts = hostProps.keys() for host in hosts: disks = [ result.disk for result in hostProps[host] ['configManager.vsanSystem'].QueryDisksForVsan() if result.state == 'ineligible' ] print 'Find ineligible disks {} in host {}'.format( [disk.displayName for disk in disks], hostProps[host]['name']) for disk in disks: if yes('Do you want to wipe disk {}?\nPlease Always check the partition table and the data stored' ' on those disks before doing any wipe! (yes/no)?'.format( disk.displayName)): hostProps[host][ 'configManager.storageSystem'].UpdateDiskPartitions( disk.deviceName, vim.HostDiskPartitionSpec()) tasks = [] configInfo = vim.VsanHostConfigInfo( networkInfo=vim.VsanHostConfigInfoNetworkInfo(port=[ vim.VsanHostConfigInfoNetworkInfoPortConfig( device=args.vmknic, ipConfig=vim.VsanHostIpConfig(upstreamIpAddress='224.1.2.3', downstreamIpAddress='224.2.3.4')) ])) for host in hosts: print 'Enable VSAN trafic in host {} with {}'.format( hostProps[host]['name'], args.vmknic) task = hostProps[host]['configManager.vsanSystem'].UpdateVsan_Task( configInfo) tasks.append(task) vsanapiutils.WaitForTasks(tasks, si) del tasks[:] print 'Enable VSAN by claiming disks manually' #Build vsanReconfigSpec step by step, it only take effect after method VsanClusterReconfig is called vsanReconfigSpec = vim.VimVsanReconfigSpec( modify=True, vsanClusterConfig=vim.VsanClusterConfigInfo( enabled=True, defaultConfig=vim.VsanClusterConfigInfoHostDefaultInfo( autoClaimStorage=False))) if isallFlash: print 'Enable deduplication and compression for VSAN' vsanReconfigSpec.dataEfficiencyConfig = vim.VsanDataEfficiencyConfig( compressionEnabled=True, dedupEnabled=True) if args.faultdomains: print 'Add fault domains in vsan' faultDomains = [] #args.faultdomains is a string like f1:host1,host2 f2:host3,host4 for faultdomain in args.faultdomains.split(): fname, hostnames = faultdomain.split(':') domainSpec = vim.cluster.VsanFaultDomainSpec( name=fname, hosts=[ host for host in hosts if hostProps[host]['name'] in hostnames.split(',') ]) faultDomains.append(domainSpec) vsanReconfigSpec.faultDomainsSpec = vim.VimClusterVsanFaultDomainsConfigSpec( faultDomains=faultDomains) task = vsanClusterSystem.VsanClusterReconfig(cluster, vsanReconfigSpec) vsanapiutils.WaitForTasks([task], si) diskmap = {host: {'cache': [], 'capacity': []} for host in hosts} cacheDisks = [] capacityDisks = [] if isallFlash: #Get eligible ssd from host for host in hosts: ssds = [ result.disk for result in hostProps[host] ['configManager.vsanSystem'].QueryDisksForVsan() if result.state == 'eligible' and result.disk.ssd ] smallerSize = min([ disk.capacity.block * disk.capacity.blockSize for disk in ssds ]) for ssd in ssds: size = ssd.capacity.block * ssd.capacity.blockSize if size == smallerSize: diskmap[host]['cache'].append(ssd) cacheDisks.append((ssd.displayName, sizeof_fmt(size), hostProps[host]['name'])) else: diskmap[host]['capacity'].append(ssd) capacityDisks.append((ssd.displayName, sizeof_fmt(size), hostProps[host]['name'])) else: for host in hosts: disks = [ result.disk for result in hostProps[host] ['configManager.vsanSystem'].QueryDisksForVsan() if result.state == 'eligible' ] ssds = [disk for disk in disks if disk.ssd] hdds = [disk for disk in disks if not disk.ssd] for disk in ssds: diskmap[host]['cache'].append(disk) size = disk.capacity.block * disk.capacity.blockSize cacheDisks.append((disk.displayName, sizeof_fmt(size), hostProps[host]['name'])) for disk in hdds: diskmap[host]['capacity'].append(disk) size = disk.capacity.block * disk.capacity.blockSize capacityDisks.append((disk.displayName, sizeof_fmt(size), hostProps[host]['name'])) print 'Claim these disks to cache disks' for disk in cacheDisks: print 'Name:{}, Size:{}, Host:{}'.format(disk[0], disk[1], disk[2]) print 'Claim these disks to capacity disks' for disk in capacityDisks: print 'Name:{}, Size:{}, Host:{}'.format(disk[0], disk[1], disk[2]) for host, disks in diskmap.iteritems(): if disks['cache'] and disks['capacity']: dm = vim.VimVsanHostDiskMappingCreationSpec( cacheDisks=disks['cache'], capacityDisks=disks['capacity'], creationType='allFlash' if isallFlash else 'hybrid', host=host) task = vsanVcDiskManagementSystem.InitializeDiskMappings(dm) tasks.append(task) print 'Wait for create disk group tasks finish' vsanapiutils.WaitForTasks(tasks, si) del tasks[:] print 'Display disk groups in each host' for host in hosts: diskMaps = vsanVcDiskManagementSystem.QueryDiskMappings(host) for index, diskMap in enumerate(diskMaps, 1): print 'Host:{}, DiskGroup:{}, Cache Disks:{}, Capacity Disks:{}'.format( hostProps[host]['name'], index, diskMap.mapping.ssd.displayName, [disk.displayName for disk in diskMap.mapping.nonSsd]) print 'Enable perf service on this cluster' vsanPerfSystem = vcMos['vsan-performance-manager'] task = vsanPerfSystem.CreateStatsObjectTask(cluster) vsanapiutils.WaitForTasks([task], si)