Exemplo n.º 1
0
def main():
    args = GetArgs()
    if args.password:
        password = args.password
    else:
        password = getpass.getpass(prompt='Enter password for host %s and '
                                   'user %s: ' % (args.host, args.user))

    #For python 2.7.9 and later, the defaul SSL conext has more strict
    #connection handshaking rule. We may need turn of the hostname checking
    #and client side cert verification
    context = None
    if sys.version_info[:3] > (2, 7, 8):
        context = ssl.create_default_context()
        context.check_hostname = False
        context.verify_mode = ssl.CERT_NONE

    # Disabling the annoying InsecureRequestWarning message
    requests.packages.urllib3.disable_warnings(InsecureRequestWarning)

    si = SmartConnect(host=args.host,
                      user=args.user,
                      pwd=password,
                      port=int(args.port),
                      sslContext=context)

    atexit.register(Disconnect, si)

    #for detecting whether the host is VC or ESXi
    aboutInfo = si.content.about

    if aboutInfo.apiType == 'VirtualCenter':
        majorApiVersion = aboutInfo.apiVersion.split('.')[0]
        if int(majorApiVersion) < 6:
            print(
                'The Virtual Center with version %s (lower than 6.0) is not supported.'
                % aboutInfo.apiVersion)
            return -1

        # Create vSphere Datacenter
        folder = si.content.rootFolder

        dc_moref = get_obj(si.content, [vim.Datacenter], args.datacenterName)
        if not dc_moref:
            print("Creating vSphere Datacenter: %s" % args.datacenterName)
            dc_moref = folder.CreateDatacenter(name=args.datacenterName)

        # Create vSphere Cluster
        host_folder = dc_moref.hostFolder
        cluster_spec = vim.cluster.ConfigSpecEx()
        drs_config = vim.cluster.DrsConfigInfo()
        drs_config.enabled = True
        cluster_spec.drsConfig = drs_config
        vsan_config = vim.vsan.cluster.ConfigInfo()
        vsan_config.enabled = True
        cluster_spec.vsanConfig = vsan_config
        print("Creating vSphere Cluster: %s" % args.clusterName)
        cluster = host_folder.CreateClusterEx(name=args.clusterName,
                                              spec=cluster_spec)

        #Here is an example of how to access VC side VSAN Health Service API
        vcMos = vsanapiutils.GetVsanVcMos(si._stub, context=context)

        # Get VSAN Cluster Config System
        vccs = vcMos['vsan-cluster-config-system']

        #cluster = getClusterInstance(args.clusterName, si)

        if cluster is None:
            print("Cluster %s is not found for %s" %
                  (args.clusterName, args.host))
            return -1

        vsanCluster = vccs.VsanClusterGetConfig(cluster=cluster)

        # Check to see if Dedupe & Compression is already enabled, if not, then we'll enable it
        if (vsanCluster.dataEfficiencyConfig.compressionEnabled == False
                or vsanCluster.dataEfficiencyConfig.dedupEnabled == False):
            print(
                "Enabling Compression/Dedupe capability on vSphere Cluster: %s"
                % args.clusterName)
            # Create new VSAN Reconfig Spec, both Compression/Dedupe must be enabled together
            vsanSpec = vim.VimVsanReconfigSpec(
                dataEfficiencyConfig=vim.VsanDataEfficiencyConfig(
                    compressionEnabled=True, dedupEnabled=True),
                modify=True)
            vsanTask = vccs.VsanClusterReconfig(cluster=cluster,
                                                vsanReconfigSpec=vsanSpec)
            vcTask = vsanapiutils.ConvertVsanTaskToVcTask(vsanTask, si._stub)
            vsanapiutils.WaitForTasks([vcTask], si)
        else:
            print(
                "Compression/Dedupe is already enabled on vSphere Cluster: %s"
                % args.clusterName)
Exemplo n.º 2
0
def main():
   args = GetArgs()
   if args.password:
      password = args.password
   else:
      password = getpass.getpass(prompt='Enter password for host %s and '
                                        'user %s: ' % (args.host,args.user))

   #For python 2.7.9 and later, the defaul SSL conext has more strict
   #connection handshaking rule. We may need turn of the hostname checking
   #and client side cert verification
   context = None
   if sys.version_info[:3] > (2,7,8):
      context = ssl.create_default_context()
      context.check_hostname = False
      context.verify_mode = ssl.CERT_NONE

   # Disabling the annoying InsecureRequestWarning message
   requests.packages.urllib3.disable_warnings(InsecureRequestWarning)

   si = SmartConnect(host=args.host,
                     user=args.user,
                     pwd=password,
                     port=int(args.port),
                     sslContext=context)

   atexit.register(Disconnect, si)

   #for detecting whether the host is VC or ESXi
   aboutInfo = si.content.about

   if aboutInfo.apiType == 'VirtualCenter':
      majorApiVersion = aboutInfo.apiVersion.split('.')[0]
      if int(majorApiVersion) < 6:
         print('The Virtual Center with version %s (lower than 6.0) is not supported.'
               % aboutInfo.apiVersion)
         return -1

      #Here is an example of how to access VC side VSAN Health Service API
      vcMos = vsanapiutils.GetVsanVcMos(si._stub, context=context)

      # Get VSAN Cluster Config System
      vccs = vcMos['vsan-cluster-config-system']

      cluster = getClusterInstance(args.clusterName, si)

      if cluster is None:
         print("Cluster %s is not found for %s" % (args.clusterName, args.host))
         return -1

      # Check to see if Automatic Claiming is enabled, if so, we need to disable else we can continue
      vsanCluster = vccs.VsanClusterGetConfig(cluster=cluster)

      if(vsanCluster.defaultConfig.autoClaimStorage == True):
         print ("Disabling Automatic Claiming on VSAN Cluster: %s" % args.clusterName)
         vsanSpec=vim.VimVsanReconfigSpec(
            vsanClusterConfig=vim.VsanClusterConfigInfo (
               defaultConfig=vim.VsanClusterConfigInfoHostDefaultInfo(
                  autoClaimStorage=False
               )
            ),
            modify=True
         )
         vsanTask = vccs.VsanClusterReconfig(cluster=cluster,vsanReconfigSpec=vsanSpec)
         vcTask = vsanapiutils.ConvertVsanTaskToVcTask(vsanTask,si._stub)
         vsanapiutils.WaitForTasks([vcTask],si)

      print ("Enabling Compression/Dedupe capability on VSAN Cluster: %s" % args.clusterName)
      # Create new VSAN Reconfig Spec, both Compression/Dedupe must be enabled together
      vsanSpec = vim.VimVsanReconfigSpec(
         dataEfficiencyConfig=vim.VsanDataEfficiencyConfig(
            compressionEnabled=True,
            dedupEnabled=True
         ),
         modify=True
      )
      vsanTask = vccs.VsanClusterReconfig(cluster=cluster,vsanReconfigSpec=vsanSpec)
      vcTask = vsanapiutils.ConvertVsanTaskToVcTask(vsanTask,si._stub)
      vsanapiutils.WaitForTasks([vcTask],si)
Exemplo n.º 3
0
def main():
    args = GetArgs()
    if args.password:
        password = args.password
    else:
        password = getpass.getpass(prompt='Enter password for host %s and '
                                   'user %s: ' % (args.host, args.user))

    # For python 2.7.9 and later, the defaul SSL conext has more strict
    # connection handshaking rule. We may need turn of the hostname checking
    # and client side cert verification
    context = None
    if sys.version_info[:3] > (2, 7, 8):
        context = ssl.create_default_context()
        context.check_hostname = False
        context.verify_mode = ssl.CERT_NONE

    si = SmartConnect(host=args.host,
                      user=args.user,
                      pwd=password,
                      port=int(args.port),
                      sslContext=context)

    atexit.register(Disconnect, si)

    cluster = getClusterInstance(args.clusterName, si)

    if args.vsanlicense:
        print 'Assign VSAN license'
        lm = si.content.licenseManager
        lam = lm.licenseAssignmentManager
        lam.UpdateAssignedLicense(entity=cluster._moId,
                                  licenseKey=args.vsanlicense)

    vcMos = vsanapiutils.GetVsanVcMos(si._stub, context=context)

    vsanClusterSystem = vcMos['vsan-cluster-config-system']
    vsanVcDiskManagementSystem = vcMos['vsan-disk-management-system']

    isallFlash = args.allflash

    print 'Enable VSAN with {} mode'.format(
        'all flash ' if isallFlash else 'hybrid')

    hostProps = CollectMultiple(
        si.content, cluster.host,
        ['name', 'configManager.vsanSystem', 'configManager.storageSystem'])
    hosts = hostProps.keys()

    for host in hosts:
        disks = [
            result.disk for result in hostProps[host]
            ['configManager.vsanSystem'].QueryDisksForVsan()
            if result.state == 'ineligible'
        ]
        print 'Find ineligible disks {} in host {}'.format(
            [disk.displayName for disk in disks], hostProps[host]['name'])
        for disk in disks:
            if yes('Do you want to wipe disk {}?\nPlease Always check the partition table and the data stored'
                   ' on those disks before doing any wipe! (yes/no)?'.format(
                       disk.displayName)):
                hostProps[host][
                    'configManager.storageSystem'].UpdateDiskPartitions(
                        disk.deviceName, vim.HostDiskPartitionSpec())

    tasks = []

    configInfo = vim.VsanHostConfigInfo(
        networkInfo=vim.VsanHostConfigInfoNetworkInfo(port=[
            vim.VsanHostConfigInfoNetworkInfoPortConfig(
                device=args.vmknic,
                ipConfig=vim.VsanHostIpConfig(upstreamIpAddress='224.1.2.3',
                                              downstreamIpAddress='224.2.3.4'))
        ]))

    for host in hosts:
        print 'Enable VSAN trafic in host {} with {}'.format(
            hostProps[host]['name'], args.vmknic)
        task = hostProps[host]['configManager.vsanSystem'].UpdateVsan_Task(
            configInfo)
        tasks.append(task)
    vsanapiutils.WaitForTasks(tasks, si)
    del tasks[:]

    print 'Enable VSAN by claiming disks manually'
    #Build vsanReconfigSpec step by step, it only take effect after method VsanClusterReconfig is called
    vsanReconfigSpec = vim.VimVsanReconfigSpec(
        modify=True,
        vsanClusterConfig=vim.VsanClusterConfigInfo(
            enabled=True,
            defaultConfig=vim.VsanClusterConfigInfoHostDefaultInfo(
                autoClaimStorage=False)))

    if isallFlash:
        print 'Enable deduplication and compression for VSAN'
        vsanReconfigSpec.dataEfficiencyConfig = vim.VsanDataEfficiencyConfig(
            compressionEnabled=True, dedupEnabled=True)

    if args.faultdomains:
        print 'Add fault domains in vsan'
        faultDomains = []
        #args.faultdomains is a string like f1:host1,host2 f2:host3,host4
        for faultdomain in args.faultdomains.split():
            fname, hostnames = faultdomain.split(':')
            domainSpec = vim.cluster.VsanFaultDomainSpec(
                name=fname,
                hosts=[
                    host for host in hosts
                    if hostProps[host]['name'] in hostnames.split(',')
                ])
            faultDomains.append(domainSpec)

        vsanReconfigSpec.faultDomainsSpec = vim.VimClusterVsanFaultDomainsConfigSpec(
            faultDomains=faultDomains)

    task = vsanClusterSystem.VsanClusterReconfig(cluster, vsanReconfigSpec)
    vsanapiutils.WaitForTasks([task], si)

    diskmap = {host: {'cache': [], 'capacity': []} for host in hosts}
    cacheDisks = []
    capacityDisks = []

    if isallFlash:
        #Get eligible ssd from host
        for host in hosts:
            ssds = [
                result.disk for result in hostProps[host]
                ['configManager.vsanSystem'].QueryDisksForVsan()
                if result.state == 'eligible' and result.disk.ssd
            ]
            smallerSize = min([
                disk.capacity.block * disk.capacity.blockSize for disk in ssds
            ])
            for ssd in ssds:
                size = ssd.capacity.block * ssd.capacity.blockSize
                if size == smallerSize:
                    diskmap[host]['cache'].append(ssd)
                    cacheDisks.append((ssd.displayName, sizeof_fmt(size),
                                       hostProps[host]['name']))
                else:
                    diskmap[host]['capacity'].append(ssd)
                    capacityDisks.append((ssd.displayName, sizeof_fmt(size),
                                          hostProps[host]['name']))
    else:
        for host in hosts:
            disks = [
                result.disk for result in hostProps[host]
                ['configManager.vsanSystem'].QueryDisksForVsan()
                if result.state == 'eligible'
            ]
            ssds = [disk for disk in disks if disk.ssd]
            hdds = [disk for disk in disks if not disk.ssd]

            for disk in ssds:
                diskmap[host]['cache'].append(disk)
                size = disk.capacity.block * disk.capacity.blockSize
                cacheDisks.append((disk.displayName, sizeof_fmt(size),
                                   hostProps[host]['name']))
            for disk in hdds:
                diskmap[host]['capacity'].append(disk)
                size = disk.capacity.block * disk.capacity.blockSize
                capacityDisks.append((disk.displayName, sizeof_fmt(size),
                                      hostProps[host]['name']))

    print 'Claim these disks to cache disks'
    for disk in cacheDisks:
        print 'Name:{}, Size:{}, Host:{}'.format(disk[0], disk[1], disk[2])

    print 'Claim these disks to capacity disks'
    for disk in capacityDisks:
        print 'Name:{}, Size:{}, Host:{}'.format(disk[0], disk[1], disk[2])

    for host, disks in diskmap.iteritems():
        if disks['cache'] and disks['capacity']:
            dm = vim.VimVsanHostDiskMappingCreationSpec(
                cacheDisks=disks['cache'],
                capacityDisks=disks['capacity'],
                creationType='allFlash' if isallFlash else 'hybrid',
                host=host)

            task = vsanVcDiskManagementSystem.InitializeDiskMappings(dm)
            tasks.append(task)

    print 'Wait for create disk group tasks finish'
    vsanapiutils.WaitForTasks(tasks, si)
    del tasks[:]

    print 'Display disk groups in each host'
    for host in hosts:
        diskMaps = vsanVcDiskManagementSystem.QueryDiskMappings(host)

        for index, diskMap in enumerate(diskMaps, 1):
            print 'Host:{}, DiskGroup:{}, Cache Disks:{}, Capacity Disks:{}'.format(
                hostProps[host]['name'], index,
                diskMap.mapping.ssd.displayName,
                [disk.displayName for disk in diskMap.mapping.nonSsd])

    print 'Enable perf service on this cluster'
    vsanPerfSystem = vcMos['vsan-performance-manager']
    task = vsanPerfSystem.CreateStatsObjectTask(cluster)
    vsanapiutils.WaitForTasks([task], si)
def main():
    # TEMP DEBUG
    print("Inputs for program are: ")
    for i in inputs:
        print("    ", i, inputs[i])
        if inputs[i] == None:
            print("Missing a required value for ",  i)

    try:
        si = None
        try:
            print("Trying to connect to VCENTER SERVER . . .")
            si = connect.SmartConnectNoSSL('https', inputs['vcenter_ip'], 443, inputs['vcenter_user'], inputs['vcenter_password'])
        except IOError as e:
            pass
            atexit.register(Disconnect, si)

        print("Connected to VCENTER SERVER !")
        content = si.RetrieveContent()
        cluster = get_obj(content, [vim.ClusterComputeResource], inputs['cluster'])
        print("Cluster Name is ",cluster.name)

        # Configure vmkernel adapter on all hosts for vSAN
        vmkernel_nic = "vmk0"
        enable_vsan_vmknic(si, vmkernel_nic, cluster)



        ## From https://github.com/storage-code/vsanDeploy/blob/master/vsanDeploy.py
        context = None
        context = ssl.create_default_context()
        context.check_hostname = False
        context.verify_mode = ssl.CERT_NONE

        vcMos = vsanapiutils.GetVsanVcMos(si._stub, context=context)

        vsanClusterSystem = vcMos['vsan-cluster-config-system']
        vsanVcDiskManagementSystem = vcMos['vsan-disk-management-system']

        # Build vsanReconfigSpec step by step, it only take effect after method VsanClusterReconfig is called
        vsanReconfigSpec = vim.VimVsanReconfigSpec(
            modify=True,
            vsanClusterConfig=vim.VsanClusterConfigInfo(
                enabled=True,
                defaultConfig=vim.VsanClusterConfigInfoHostDefaultInfo(
                    autoClaimStorage=True
                )
            )
        )

        print('Disable deduplication and compression for VSAN')
        vsanReconfigSpec.dataEfficiencyConfig = vim.VsanDataEfficiencyConfig(
            compressionEnabled=False,
            dedupEnabled=False)

        task = vsanClusterSystem.VsanClusterReconfig(cluster, vsanReconfigSpec)
        wait_for_task(task)

        '''
        print("Configuring HCI for cluster %s ..." % cluster.name)
        hciCfgs = []
        for mo in cluster.host:
            hciCfg = vim.ClusterComputeResource.HostConfigurationInput()
            hciCfg.host = mo
            hciCfgs.append(hciCfg)

        lockdownMode = vim.host.HostAccessManager.LockdownMode.lockdownDisabled
        NTP_SERVER = "time-c-b.nist.gov"
        hostConfigProfile = CreateHostConfigProfile(NTP_SERVER, lockdownMode)
        vSanCfgInfo = vim.vsan.cluster.ConfigInfo(
            enabled=True,
            defaultConfig=vim.vsan.cluster.ConfigInfo.HostDefaultInfo(
                autoClaimStorage=False))
        print("vSanCfgInfo Set successfully ", vSanCfgInfo)
        vSanSpec = CreateDefaultVSanSpec(vSanCfgInfo)
        print("CreateDefaultVSanSpec successfully ", vSanSpec)

        #vcProf = GetVcProf()
        #dvsProfiles = GetDvsProfiles(cluster.host)
        clusterHciSpec = vim.ClusterComputeResource.HCIConfigSpec(
            hostConfigProfile=hostConfigProfile,
            vSanConfigSpec=vSanSpec)

        task = cluster.ConfigureHCI_Task(clusterSpec=clusterHciSpec, \
                                         hostInputs=hciCfgs)
        wait_for_task(task)
        print("Successfully configured HCI cluster %s" % clusterName)
        '''

    except vmodl.MethodFault as e:
        print("Caught vmodl fault: %s" % e.msg)
        return 1
    except Exception as e:
        print("Caught exception: %s" % str(e))
        return 1
Exemplo n.º 5
0
def main():

    args = GetArgs()
    if args.password:
        password = args.password
    else:
        password = getpass.getpass(prompt='Enter password for host %s and '
                                   'user %s: ' % (args.host, args.user))

    # For python 2.7.9 and later, the defaul SSL conext has more strict
    # connection handshaking rule. We may need turn of the hostname checking
    # and client side cert verification
    context = None
    if sys.version_info[:3] > (2, 7, 8):
        context = ssl.create_default_context()
        context.check_hostname = False
        context.verify_mode = ssl.CERT_NONE

    si = SmartConnect(host=args.host,
                      user=args.user,
                      pwd=password,
                      port=int(args.port),
                      sslContext=context)

    atexit.register(Disconnect, si)

    # for detecting whether the host is VC or ESXi
    aboutInfo = si.content.about

    if args.operation == "listdisk" or args.operation == "prepare":
        if aboutInfo.apiType == 'HostAgent':
            majorApiVersion = aboutInfo.apiVersion.split('.')[0]
            if int(majorApiVersion) < 6:
                print("This ESXi host has version %s (lower than 6.0) "
                      "is not supported." % aboutInfo.apiVersion)
                return -1

            vcMos = vsanapiutils.GetVsanEsxMos(si._stub, context=context)
            vvds = vcMos['vsan-vcsa-deployer-system']

            esx = getESXInstance(si)

            if args.operation == "listdisk":
                disks = esx.configManager.vsanSystem.QueryDisksForVsan()
                for disk in disks:
                    if (disk.state == "eligible"):
                        print("Vendor: %s" % disk.disk.vendor.strip())
                        print("Model : %s" % disk.disk.model.strip())
                        print("Name  : %s\n" % disk.disk.canonicalName.strip())
            if args.operation == "prepare":
                cacheDisk = None
                capacityDisk = None
                disks = esx.configManager.vsanSystem.QueryDisksForVsan()
                for disk in disks:
                    if (disk.disk.model.strip() == args.capacity):
                        capacityDisk = disk.disk
                    if (disk.disk.model.strip() == args.cache):
                        cacheDisk = disk.disk

                if cacheDisk is not None and capacityDisk is not None:
                    spec = vim.VsanPrepareVsanForVcsaSpec(
                        vsanDataEfficiencyConfig=vim.VsanDataEfficiencyConfig(
                            compressionEnabled=True, dedupEnabled=True),
                        vsanDiskMappingCreationSpec=vim.
                        VimVsanHostDiskMappingCreationSpec(
                            cacheDisks=[cacheDisk],
                            capacityDisks=[capacityDisk],
                            creationType="allFlash",
                            host=esx))
                    print("\nPreparing ESXi host for VCSA Bootstrap ...")
                    taskId = vvds.VsanPrepareVsanForVcsa(spec=spec)
                    progress = vvds.VsanVcsaGetBootstrapProgress(
                        taskId=[taskId])
                    trackBootstrapProgress(vvds, progress, taskId)
                else:
                    print("Unable to locate either Cache "
                          "or Capacity disk for vSAN configuration")
                    return -1
        else:
            print("listdisk and prepare operation requires "
                  "that you connect an ESXi host not a vCenter Server")
            return -1
    elif args.operation == "postconfig":
        if aboutInfo.apiType == 'VirtualCenter':
            majorApiVersion = aboutInfo.apiVersion.split('.')[0]
            if int(majorApiVersion) < 6:
                print("This vCenter Server has version %s (lower than 6.0) "
                      "is not supported." % aboutInfo.apiVersion)
                return -1

            if args.clusterName is not None and \
                args.datacenterName is not None and \
                    args.esxIP is not None and \
                    args.esxPassword is not None:
                vcMos = vsanapiutils.GetVsanVcMos(si._stub, context=context)
                vvds = vcMos['vsan-vcsa-deployer-system']

                spec = vim.VsanVcPostDeployConfigSpec(
                    clusterName=args.clusterName,
                    dcName=args.datacenterName,
                    firstHost=vim.HostConnectSpec(force=True,
                                                  hostName=args.esxIP,
                                                  userName="******",
                                                  password=args.esxPassword),
                    vsanDataEfficiencyConfig=vim.VsanDataEfficiencyConfig(
                        compressionEnabled=True, dedupEnabled=True))
                print("\nPost-Configuring vCenter Server "
                      "and adding ESXi host ...")
                taskId = vvds.VsanPostConfigForVcsa(spec=spec)
                progress = vvds.VsanVcsaGetBootstrapProgress(taskId=[taskId])
                trackBootstrapProgress(vvds, progress, taskId)
            else:
                print("Incorrect input, postconfig requires --datacenterName, "
                      "--clusterName, --esxIP and --esxPassword")
                return -1
        else:
            print("postconfig operation requires that you connect "
                  "to the deployed vCenter Server, not ESXi host")
            return -1
    else:
        print("Invalid command-line options")
        return -1