def enable_vsan_vmknic(si, vmkernel_nic, cluster):
    tasks = []
    # Update configuration spec for VMkernel networking
    configInfo = vim.vsan.host.ConfigInfo(
        networkInfo=vim.vsan.host.ConfigInfo.NetworkInfo(port=[
            vim.vsan.host.ConfigInfo.NetworkInfo.PortConfig(
                device=vmkernel_nic)
        ]))

    # Enumerate the selected VMkernel adapter for each host and add it to the list of tasks
    hostProps = CollectMultiple(
        si.content, cluster.host,
        ['name', 'configManager.vsanSystem', 'configManager.storageSystem'])
    hosts = hostProps.keys()

    for host in hosts:
        print('Enable vSAN traffic on host {} with {}'.format(
            hostProps[host]['name'], vmkernel_nic))
        task = hostProps[host]['configManager.vsanSystem'].UpdateVsan_Task(
            configInfo)
        tasks.append(task)

    # Execute the tasks
    vsanapiutils.WaitForTasks(tasks, si)

    # Build vsanReconfigSpec step by step. It takes effect only after calling the VsanClusterReconfig method
    clusterConfig = vim.VsanClusterConfigInfo(enabled=True)
    vsanReconfigSpec = vim.VimVsanReconfigSpec(modify=True,
                                               vsanClusterConfig=clusterConfig)
Ejemplo n.º 2
0
def main():
    server = 'your_vcenter'
    si = vc_connect(server)
    atexit.register(connect.Disconnect, si)
    clusterName = "your_cluster"
    cluster = getClusterInstance(clusterName, si)
    if not cluster:
        print('cluster not found, exiting')
        exit()

    vsan_config = cluster.configurationEx.vsanConfigInfo.enabled
    if not vsan_config:
        # vsan is not enabled on cluster
        context = ssl.create_default_context()
        context.check_hostname = False
        context.verify_mode = ssl.CERT_NONE
        vcMos = vsanapiutils.GetVsanVcMos(si._stub, context=context)
        vsanClusterConfigSystem = vcMos['vsan-cluster-config-system']
        vSanSpec = vim.vsan.ReconfigSpec(modify=True, )
        vSanSpec.vsanClusterConfig = vim.vsan.cluster.ConfigInfo(enabled=True)
        vSanSpec.vsanClusterConfig.defaultConfig = vim.vsan.cluster.ConfigInfo.HostDefaultInfo(
            autoClaimStorage=False)
        task = vsanClusterConfigSystem.VsanClusterReconfig(cluster, vSanSpec)
        vsanapiutils.WaitForTasks(task, si)

    else:
        print('vsan already configured on cluster, exiting')
Ejemplo n.º 3
0
def main():
    server = 'vcenter'
    si = vc_connect(server)
    atexit.register(connect.Disconnect, si)
    dc = si.content.rootFolder.childEntity[0]  # first datacenter
    content = si.RetrieveContent()
    esxihost = get_obj(content, [vim.HostSystem], 'esxi_host')
    if esxihost is None:
        print(f'Failed to find {esxihost}  in datacenter {dc.name}')
    else:
        # https://www.tachytelic.net/2014/03/posix-size-converter/
        # formula for bock to GB conversion = Number Of 512 byte blocks/2/1024/1024
        diskmap = {esxihost: {'cache': [], 'capacity': []}}
        cacheDisks = []
        capacityDisks = []
        result = esxihost.configManager.vsanSystem.QueryDisksForVsan()
        ssds = []
        for ssd in result:
            if ssd.state == 'eligible' and (
                    ssd.disk.capacity.block) / 2 / 1024 / 1024 > 300:
                ssds.append(ssd.disk)
        # https://bit.ly/37lvGc3  vSAN SDKs Programming Guide
        if ssds:
            smallerSize = min([
                disk.capacity.block * disk.capacity.blockSize for disk in ssds
            ])
            for ssd in ssds:
                size = ssd.capacity.block * ssd.capacity.blockSize
                if size == smallerSize:
                    diskmap[esxihost]['cache'].append(ssd)
                    cacheDisks.append((ssd.displayName, size, esxihost.name))
                else:
                    diskmap[esxihost]['capacity'].append(ssd)
                    capacityDisks.append(
                        (ssd.displayName, size, esxihost.name))

            for host, disks in diskmap.items():
                if disks['cache'] and disks['capacity']:
                    dm = vim.VimVsanHostDiskMappingCreationSpec(
                        cacheDisks=disks['cache'],
                        capacityDisks=disks['capacity'],
                        creationType='allFlash',
                        host=host)
            # Execute the task
            tasks = []
            context = ssl.create_default_context()
            context.check_hostname = False
            context.verify_mode = ssl.CERT_NONE
            # next two lines from https://github.com/storage-code/vsanDeploy/blob/master/vsanDeploy.py
            vcMos = vsanapiutils.GetVsanVcMos(si._stub, context=context)
            vsanVcDiskManagementSystem = vcMos['vsan-disk-management-system']
            task = vsanVcDiskManagementSystem.InitializeDiskMappings(dm)
            tasks.append(task)
            vsanapiutils.WaitForTasks(tasks, si)
        else:
            print('no disks to add')
            exit()
def SetUpMetricToken(si, clusterConfigSystem, cluster):
   token = GenerateRandomToken()
   spec = SetupClusterMetricSpec(token)
   # ReconfigureEx method need privilege Host.Inventory.EditCluster on the cluster
   vsanTask = clusterConfigSystem.ReconfigureEx(cluster, spec)
   vcTask = vsanapiutils.ConvertVsanTaskToVcTask(vsanTask, si._stub)
   vsanapiutils.WaitForTasks([vcTask], si)
   if vcTask.info.state != 'success':
      raise Exception("Failed to reconfig vSAN metrics config: %s. Args: %s" % (vcTask.info, spec))
   return token
Ejemplo n.º 5
0
def main():
    args = GetArgs()
    if args.password:
        password = args.password
    else:
        password = getpass.getpass(prompt='Enter password for host %s and '
                                   'user %s: ' % (args.host, args.user))

    #For python 2.7.9 and later, the defaul SSL conext has more strict
    #connection handshaking rule. We may need turn of the hostname checking
    #and client side cert verification
    context = None
    if sys.version_info[:3] > (2, 7, 8):
        context = ssl.create_default_context()
        context.check_hostname = False
        context.verify_mode = ssl.CERT_NONE

    # Disabling the annoying InsecureRequestWarning message
    requests.packages.urllib3.disable_warnings(InsecureRequestWarning)

    si = SmartConnect(host=args.host,
                      user=args.user,
                      pwd=password,
                      port=int(args.port),
                      sslContext=context)

    atexit.register(Disconnect, si)

    #for detecting whether the host is VC or ESXi
    aboutInfo = si.content.about

    if aboutInfo.apiType == 'VirtualCenter':
        majorApiVersion = aboutInfo.apiVersion.split('.')[0]
        if int(majorApiVersion) < 6:
            print(
                'The Virtual Center with version %s (lower than 6.0) is not supported.'
                % aboutInfo.apiVersion)
            return -1

        # Create vSphere Datacenter
        folder = si.content.rootFolder

        dc_moref = get_obj(si.content, [vim.Datacenter], args.datacenterName)
        if not dc_moref:
            print("Creating vSphere Datacenter: %s" % args.datacenterName)
            dc_moref = folder.CreateDatacenter(name=args.datacenterName)

        # Create vSphere Cluster
        host_folder = dc_moref.hostFolder
        cluster_spec = vim.cluster.ConfigSpecEx()
        drs_config = vim.cluster.DrsConfigInfo()
        drs_config.enabled = True
        cluster_spec.drsConfig = drs_config
        vsan_config = vim.vsan.cluster.ConfigInfo()
        vsan_config.enabled = True
        cluster_spec.vsanConfig = vsan_config
        print("Creating vSphere Cluster: %s" % args.clusterName)
        cluster = host_folder.CreateClusterEx(name=args.clusterName,
                                              spec=cluster_spec)

        #Here is an example of how to access VC side VSAN Health Service API
        vcMos = vsanapiutils.GetVsanVcMos(si._stub, context=context)

        # Get VSAN Cluster Config System
        vccs = vcMos['vsan-cluster-config-system']

        #cluster = getClusterInstance(args.clusterName, si)

        if cluster is None:
            print("Cluster %s is not found for %s" %
                  (args.clusterName, args.host))
            return -1

        vsanCluster = vccs.VsanClusterGetConfig(cluster=cluster)

        # Check to see if Dedupe & Compression is already enabled, if not, then we'll enable it
        if (vsanCluster.dataEfficiencyConfig.compressionEnabled == False
                or vsanCluster.dataEfficiencyConfig.dedupEnabled == False):
            print(
                "Enabling Compression/Dedupe capability on vSphere Cluster: %s"
                % args.clusterName)
            # Create new VSAN Reconfig Spec, both Compression/Dedupe must be enabled together
            vsanSpec = vim.VimVsanReconfigSpec(
                dataEfficiencyConfig=vim.VsanDataEfficiencyConfig(
                    compressionEnabled=True, dedupEnabled=True),
                modify=True)
            vsanTask = vccs.VsanClusterReconfig(cluster=cluster,
                                                vsanReconfigSpec=vsanSpec)
            vcTask = vsanapiutils.ConvertVsanTaskToVcTask(vsanTask, si._stub)
            vsanapiutils.WaitForTasks([vcTask], si)
        else:
            print(
                "Compression/Dedupe is already enabled on vSphere Cluster: %s"
                % args.clusterName)
Ejemplo n.º 6
0
def main():

    args = GetArgs()
    if args.password:
        password = args.password
    else:
        password = getpass.getpass(prompt='Enter password for host %s and '
                                   'user %s: ' % (args.host, args.user))

    # For python 2.7.9 and later, the defaul SSL conext has more strict
    # connection handshaking rule. We may need turn of the hostname checking
    # and client side cert verification
    context = None
    if sys.version_info[:3] > (2, 7, 8):
        context = ssl.create_default_context()
        context.check_hostname = False
        context.verify_mode = ssl.CERT_NONE

    si = SmartConnect(host=args.host,
                      user=args.user,
                      pwd=password,
                      port=int(args.port),
                      sslContext=context)

    atexit.register(Disconnect, si)

    # for detecting whether the host is VC or ESXi
    aboutInfo = si.content.about

    if aboutInfo.apiType == 'VirtualCenter':
        majorApiVersion = aboutInfo.apiVersion.split('.')[0]
        if int(majorApiVersion) < 6:
            print('The Virtual Center with version %s (lower than 6.0) \
                   is not supported.' % aboutInfo.apiVersion)
            return -1

        # Here is an example of how to access VC side VSAN Health Service API
        vcMos = vsanapiutils.GetVsanVcMos(si._stub, context=context)

        vchs = vcMos['vsan-cluster-health-system']

        cluster = getClusterInstance(args.clusterName, si)

        if cluster is None:
            print("Cluster %s is not found for %s" %
                  (args.clusterName, args.host))
            return -1

        if args.operation == "get":
            results = isRebalancing(vchs, cluster)
            print("%s rebalancing: %s \n" % (args.clusterName, results))
        elif args.operation == "start":
            if not isRebalancing(vchs, cluster):
                print("Starting rebalancing operation on %s cluster ..." %
                      args.clusterName)
                vsanTask = vchs.VsanRebalanceCluster(cluster=cluster)
                vcTask = vsanapiutils.ConvertVsanTaskToVcTask(
                    vsanTask, si._stub)
                vsanapiutils.WaitForTasks([vcTask], si)
            else:
                print("Rebalancing operation is already currently in progress")
        elif args.operation == "stop":
            if isRebalancing(vchs, cluster):
                print("Stopping rebalancing operation on %s cluster ..." %
                      args.clusterName)
                vsanTask = vchs.VsanStopRebalanceCluster(cluster=cluster)
                vcTask = vsanapiutils.ConvertVsanTaskToVcTask(
                    vsanTask, si._stub)
                vsanapiutils.WaitForTasks([vcTask], si)
            else:
                print("The rebalancing operation is currently not running")
Ejemplo n.º 7
0
def main():
    args = GetArgs()
    if args.password:
        password = args.password
    else:
        password = getpass.getpass(prompt='Enter password for host %s and '
                                   'user %s: ' % (args.host, args.user))

    #For python 2.7.9 and later, the defaul SSL conext has more strict
    #connection handshaking rule. We may need turn of the hostname checking
    #and client side cert verification
    context = None
    if sys.version_info[:3] > (2, 7, 8):
        context = ssl.create_default_context()
        context.check_hostname = False
        context.verify_mode = ssl.CERT_NONE

    si = SmartConnect(host=args.host,
                      user=args.user,
                      pwd=password,
                      port=int(args.port),
                      sslContext=context)

    atexit.register(Disconnect, si)

    #for detecting whether the host is VC or ESXi
    aboutInfo = si.content.about

    if aboutInfo.apiType == 'VirtualCenter':
        majorApiVersion = aboutInfo.apiVersion.split('.')[0]
        if int(majorApiVersion) < 6:
            print(
                'The Virtual Center with version %s (lower than 6.0) is not supported.'
                % aboutInfo.apiVersion)
            return -1

        #Here is an example of how to access VC side VSAN Health Service API
        vcMos = vsanapiutils.GetVsanVcMos(si._stub, context=context)
        # Get vsan health system
        vhs = vcMos['vsan-cluster-health-system']

        cluster = getClusterInstance(args.clusterName, si)

        if cluster is None:
            print("Cluster %s is not found for %s" %
                  (args.clusterName, args.host))
            return -1
        #VSAN cluster health summary can be cached at VC.
        fetchFromCache = True
        fetchFromCacheAnswer = raw_input(
            'Do you want to fetch the cluster health from cache if exists?(y/n):'
        )
        if fetchFromCacheAnswer.lower() == 'n':
            fetchFromCache = False
        print('Fetching cluster health from cached state: %s' %
              ('Yes' if fetchFromCache else 'No'))
        healthSummary = vhs.QueryClusterHealthSummary(
            cluster=cluster,
            includeObjUuids=True,
            fetchFromCache=fetchFromCache)
        clusterStatus = healthSummary.clusterStatus

        print("Cluster %s Status: %s" %
              (args.clusterName, clusterStatus.status))
        for hostStatus in clusterStatus.trackedHostsStatus:
            print("Host %s Status: %s" %
                  (hostStatus.hostname, hostStatus.status))

        #Here is an example of how to track a task retruned by the VSAN API
        vsanTask = vhs.RepairClusterObjectsImmediate(cluster)
        #need covert to vcTask to bind the MO with vc session
        vcTask = vsanapiutils.ConvertVsanTaskToVcTask(vsanTask, si._stub)
        vsanapiutils.WaitForTasks([vcTask], si)
        print('Repairing cluster objects task completed with state: %s' %
              vcTask.info.state)

    if aboutInfo.apiType == 'HostAgent':
        majorApiVersion = aboutInfo.apiVersion.split('.')[0]
        if int(majorApiVersion) < 6:
            print(
                'The ESXi with version %s (lower than 6.0) is not supported.' %
                aboutInfo.apiVersion)
            return -1

        #Here is an example of how to access ESXi side VSAN Performance Service API
        esxMos = vsanapiutils.GetVsanEsxMos(si._stub, context=context)
        # Get vsan health system
        vpm = esxMos['vsan-performance-manager']

        nodeInfo = vpm.VsanPerfQueryNodeInformation()[0]

        print('Hostname: %s' % args.host)
        print('  version: %s' % nodeInfo.version)
        print('  isCmmdsMaster: %s' % nodeInfo.isCmmdsMaster)
        print('  isStatsMaster: %s' % nodeInfo.isStatsMaster)
        print('  vsanMasterUuid: %s' % nodeInfo.vsanMasterUuid)
        print('  vsanNodeUuid: %s' % nodeInfo.vsanNodeUuid)
Ejemplo n.º 8
0
def main():
   args = GetArgs()
   if args.password:
      password = args.password
   else:
      password = getpass.getpass(prompt='Enter password for host %s and '
                                        'user %s: ' % (args.host,args.user))

   #For python 2.7.9 and later, the defaul SSL conext has more strict
   #connection handshaking rule. We may need turn of the hostname checking
   #and client side cert verification
   context = None
   if sys.version_info[:3] > (2,7,8):
      context = ssl.create_default_context()
      context.check_hostname = False
      context.verify_mode = ssl.CERT_NONE

   # Disabling the annoying InsecureRequestWarning message
   requests.packages.urllib3.disable_warnings(InsecureRequestWarning)

   si = SmartConnect(host=args.host,
                     user=args.user,
                     pwd=password,
                     port=int(args.port),
                     sslContext=context)

   atexit.register(Disconnect, si)

   #for detecting whether the host is VC or ESXi
   aboutInfo = si.content.about

   if aboutInfo.apiType == 'VirtualCenter':
      majorApiVersion = aboutInfo.apiVersion.split('.')[0]
      if int(majorApiVersion) < 6:
         print('The Virtual Center with version %s (lower than 6.0) is not supported.'
               % aboutInfo.apiVersion)
         return -1

      #Here is an example of how to access VC side VSAN Health Service API
      vcMos = vsanapiutils.GetVsanVcMos(si._stub, context=context)

      # Get VSAN Cluster Config System
      vccs = vcMos['vsan-cluster-config-system']

      cluster = getClusterInstance(args.clusterName, si)

      if cluster is None:
         print("Cluster %s is not found for %s" % (args.clusterName, args.host))
         return -1

      # Check to see if Automatic Claiming is enabled, if so, we need to disable else we can continue
      vsanCluster = vccs.VsanClusterGetConfig(cluster=cluster)

      if(vsanCluster.defaultConfig.autoClaimStorage == True):
         print ("Disabling Automatic Claiming on VSAN Cluster: %s" % args.clusterName)
         vsanSpec=vim.VimVsanReconfigSpec(
            vsanClusterConfig=vim.VsanClusterConfigInfo (
               defaultConfig=vim.VsanClusterConfigInfoHostDefaultInfo(
                  autoClaimStorage=False
               )
            ),
            modify=True
         )
         vsanTask = vccs.VsanClusterReconfig(cluster=cluster,vsanReconfigSpec=vsanSpec)
         vcTask = vsanapiutils.ConvertVsanTaskToVcTask(vsanTask,si._stub)
         vsanapiutils.WaitForTasks([vcTask],si)

      print ("Enabling Compression/Dedupe capability on VSAN Cluster: %s" % args.clusterName)
      # Create new VSAN Reconfig Spec, both Compression/Dedupe must be enabled together
      vsanSpec = vim.VimVsanReconfigSpec(
         dataEfficiencyConfig=vim.VsanDataEfficiencyConfig(
            compressionEnabled=True,
            dedupEnabled=True
         ),
         modify=True
      )
      vsanTask = vccs.VsanClusterReconfig(cluster=cluster,vsanReconfigSpec=vsanSpec)
      vcTask = vsanapiutils.ConvertVsanTaskToVcTask(vsanTask,si._stub)
      vsanapiutils.WaitForTasks([vcTask],si)
Ejemplo n.º 9
0
def main():
    args = GetArgs()
    if args.password:
        password = args.password
    else:
        password = getpass.getpass(prompt='Enter password for host %s and '
                                   'user %s: ' % (args.host, args.user))

    # For python 2.7.9 and later, the default SSL context has more strict
    # connection handshaking rule. We may need turn of the hostname checking
    # and client side cert verification.
    context = None
    if sys.version_info[:3] > (2, 7, 8):
        context = ssl.create_default_context()
        context.check_hostname = False
        context.verify_mode = ssl.CERT_NONE

    si = SmartConnect(host=args.host,
                      user=args.user,
                      pwd=password,
                      port=int(args.port),
                      sslContext=context)

    atexit.register(Disconnect, si)

    # For detecting whether the host is vCenter or ESXi.
    aboutInfo = si.content.about
    apiVersion = vsanapiutils.GetLatestVmodlVersion(args.host)

    if aboutInfo.apiType == 'VirtualCenter':
        vcVersion = StrictVersion(aboutInfo.apiVersion)
        if vcVersion < StrictVersion('6.5'):
            print(
                'The Virtual Center with version %s (lower than 6.5) is not ',
                'supported.' % aboutInfo.apiVersion)
            return -1
    else:
        print('The vSAN iSCSI service API are only accessible through ',
              'vCenter')
        return -1

    cluster = getClusterInstance(args.clusterName, si)
    if cluster is None:
        print("Cluster %s is not found for %s" % (args.clusterName, args.host))
        return -1

    vcMos = vsanapiutils.GetVsanVcMos(si._stub,
                                      context=context,
                                      version=apiVersion)
    vits = vcMos['vsan-cluster-iscsi-target-system']
    vccs = vcMos['vsan-cluster-config-system']

    # Fetch the storage policy ID for enable vSAN iSCSI service and
    # create the iSCSI home object.
    pbmSi = connectToSpbm(si._stub, context)
    vsanStoragePolicy = getVsanStoragePolicy(pbmSi)
    if vsanStoragePolicy is None:
        print('Cannot find the vSAN Storage Policy from the Virtual ' +
              'Center server.')
        return -1

    # Enable iSCSI service through vSAN Cluster Reconfiguration API on VC, and
    # the config port defaults to 3260 and can be customized.
    defaultVsanConfigSpec = vim.cluster.VsanIscsiTargetServiceDefaultConfigSpec(
        networkInterface="vmk0", port=2300)
    vitEnableSpec = vim.cluster.VsanIscsiTargetServiceSpec(
        homeObjectStoragePolicy=vsanStoragePolicy,
        defaultConfig=defaultVsanConfigSpec,
        enabled=True)

    clusterReconfigSpec = vim.vsan.ReconfigSpec(iscsiSpec=vitEnableSpec)
    vitEnableVsanTask = vccs.ReconfigureEx(cluster, clusterReconfigSpec)
    vitEnableVcTask = vsanapiutils.ConvertVsanTaskToVcTask(
        vitEnableVsanTask, si._stub)
    vsanapiutils.WaitForTasks([vitEnableVcTask], si)
    print('Enable vSAN iSCSI service task finished with status: %s' %
          vitEnableVcTask.info.state)

    # Create vSAN iSCSI targets and an associated LUN with the size of 1GB.
    targetAlias = "sampleTarget"
    targetSpec = vim.cluster.VsanIscsiTargetSpec(
        alias=targetAlias, iqn='iqn.2015-08.com.vmware:vit.target1')
    vsanTask = vits.AddIscsiTarget(cluster, targetSpec)
    vcTask = vsanapiutils.ConvertVsanTaskToVcTask(vsanTask, si._stub)
    vsanapiutils.WaitForTasks([vcTask], si)
    print('Create vSAN iSCSI target task finished with status: %s' %
          vcTask.info.state)

    lunSize = 1 * 1024 * 1024 * 1024  # 1GB
    lunSpec = vim.cluster.VsanIscsiLUNSpec(lunId=0,
                                           lunSize=lunSize,
                                           storagePolicy=vsanStoragePolicy)
    vsanTask = vits.AddIscsiLUN(cluster, targetAlias, lunSpec)
    vcTask = vsanapiutils.ConvertVsanTaskToVcTask(vsanTask, si._stub)
    vsanapiutils.WaitForTasks([vcTask], si)
    print('Create vSAN iSCSI LUN task finished with status: %s' %
          vcTask.info.state)

    # Remove vSAN iSCSI targets and LUN associated with the targets.
    vsanTask = vits.RemoveIscsiLUN(cluster, targetAlias, 0)
    vcTask = vsanapiutils.ConvertVsanTaskToVcTask(vsanTask, si._stub)
    vsanapiutils.WaitForTasks([vcTask], si)
    print("Remove vSAN iSCSI LUN task finished with status:%s" %
          vcTask.info.state)

    vsanTask = vits.RemoveIscsiTarget(cluster, targetAlias)
    vcTask = vsanapiutils.ConvertVsanTaskToVcTask(vsanTask, si._stub)
    vsanapiutils.WaitForTasks([vcTask], si)
    print("Remove vSAN iSCSI target task finished with status:%s" %
          vcTask.info.state)

    # Disable iSCSI service through vSAN iSCSI API on vCenter.
    vitDisableSpec = vim.cluster.VsanIscsiTargetServiceSpec(enabled=False)
    clusterReconfigSpec = vim.vsan.ReconfigSpec(iscsiSpec=vitDisableSpec)
    vitDisableVsanTask = vccs.ReconfigureEx(cluster, clusterReconfigSpec)
    vitDisableVcTask = vsanapiutils.ConvertVsanTaskToVcTask(
        vitDisableVsanTask, si._stub)
    vsanapiutils.WaitForTasks([vitDisableVcTask], si)
    print('Disable vSAN iSCSI service task finished with status: %s' %
          vitDisableVcTask.info.state)
Ejemplo n.º 10
0
def main():
    args = GetArgs()
    if args.password:
        password = args.password
    else:
        password = getpass.getpass(prompt='Enter password for host %s and '
                                   'user %s: ' % (args.host, args.user))

    # For python 2.7.9 and later, the defaul SSL conext has more strict
    # connection handshaking rule. We may need turn of the hostname checking
    # and client side cert verification
    context = None
    if sys.version_info[:3] > (2, 7, 8):
        context = ssl.create_default_context()
        context.check_hostname = False
        context.verify_mode = ssl.CERT_NONE

    si = SmartConnect(host=args.host,
                      user=args.user,
                      pwd=password,
                      port=int(args.port),
                      sslContext=context)

    atexit.register(Disconnect, si)

    cluster = getClusterInstance(args.clusterName, si)

    if args.vsanlicense:
        print 'Assign VSAN license'
        lm = si.content.licenseManager
        lam = lm.licenseAssignmentManager
        lam.UpdateAssignedLicense(entity=cluster._moId,
                                  licenseKey=args.vsanlicense)

    vcMos = vsanapiutils.GetVsanVcMos(si._stub, context=context)

    vsanClusterSystem = vcMos['vsan-cluster-config-system']
    vsanVcDiskManagementSystem = vcMos['vsan-disk-management-system']

    isallFlash = args.allflash

    print 'Enable VSAN with {} mode'.format(
        'all flash ' if isallFlash else 'hybrid')

    hostProps = CollectMultiple(
        si.content, cluster.host,
        ['name', 'configManager.vsanSystem', 'configManager.storageSystem'])
    hosts = hostProps.keys()

    for host in hosts:
        disks = [
            result.disk for result in hostProps[host]
            ['configManager.vsanSystem'].QueryDisksForVsan()
            if result.state == 'ineligible'
        ]
        print 'Find ineligible disks {} in host {}'.format(
            [disk.displayName for disk in disks], hostProps[host]['name'])
        for disk in disks:
            if yes('Do you want to wipe disk {}?\nPlease Always check the partition table and the data stored'
                   ' on those disks before doing any wipe! (yes/no)?'.format(
                       disk.displayName)):
                hostProps[host][
                    'configManager.storageSystem'].UpdateDiskPartitions(
                        disk.deviceName, vim.HostDiskPartitionSpec())

    tasks = []

    configInfo = vim.VsanHostConfigInfo(
        networkInfo=vim.VsanHostConfigInfoNetworkInfo(port=[
            vim.VsanHostConfigInfoNetworkInfoPortConfig(
                device=args.vmknic,
                ipConfig=vim.VsanHostIpConfig(upstreamIpAddress='224.1.2.3',
                                              downstreamIpAddress='224.2.3.4'))
        ]))

    for host in hosts:
        print 'Enable VSAN trafic in host {} with {}'.format(
            hostProps[host]['name'], args.vmknic)
        task = hostProps[host]['configManager.vsanSystem'].UpdateVsan_Task(
            configInfo)
        tasks.append(task)
    vsanapiutils.WaitForTasks(tasks, si)
    del tasks[:]

    print 'Enable VSAN by claiming disks manually'
    #Build vsanReconfigSpec step by step, it only take effect after method VsanClusterReconfig is called
    vsanReconfigSpec = vim.VimVsanReconfigSpec(
        modify=True,
        vsanClusterConfig=vim.VsanClusterConfigInfo(
            enabled=True,
            defaultConfig=vim.VsanClusterConfigInfoHostDefaultInfo(
                autoClaimStorage=False)))

    if isallFlash:
        print 'Enable deduplication and compression for VSAN'
        vsanReconfigSpec.dataEfficiencyConfig = vim.VsanDataEfficiencyConfig(
            compressionEnabled=True, dedupEnabled=True)

    if args.faultdomains:
        print 'Add fault domains in vsan'
        faultDomains = []
        #args.faultdomains is a string like f1:host1,host2 f2:host3,host4
        for faultdomain in args.faultdomains.split():
            fname, hostnames = faultdomain.split(':')
            domainSpec = vim.cluster.VsanFaultDomainSpec(
                name=fname,
                hosts=[
                    host for host in hosts
                    if hostProps[host]['name'] in hostnames.split(',')
                ])
            faultDomains.append(domainSpec)

        vsanReconfigSpec.faultDomainsSpec = vim.VimClusterVsanFaultDomainsConfigSpec(
            faultDomains=faultDomains)

    task = vsanClusterSystem.VsanClusterReconfig(cluster, vsanReconfigSpec)
    vsanapiutils.WaitForTasks([task], si)

    diskmap = {host: {'cache': [], 'capacity': []} for host in hosts}
    cacheDisks = []
    capacityDisks = []

    if isallFlash:
        #Get eligible ssd from host
        for host in hosts:
            ssds = [
                result.disk for result in hostProps[host]
                ['configManager.vsanSystem'].QueryDisksForVsan()
                if result.state == 'eligible' and result.disk.ssd
            ]
            smallerSize = min([
                disk.capacity.block * disk.capacity.blockSize for disk in ssds
            ])
            for ssd in ssds:
                size = ssd.capacity.block * ssd.capacity.blockSize
                if size == smallerSize:
                    diskmap[host]['cache'].append(ssd)
                    cacheDisks.append((ssd.displayName, sizeof_fmt(size),
                                       hostProps[host]['name']))
                else:
                    diskmap[host]['capacity'].append(ssd)
                    capacityDisks.append((ssd.displayName, sizeof_fmt(size),
                                          hostProps[host]['name']))
    else:
        for host in hosts:
            disks = [
                result.disk for result in hostProps[host]
                ['configManager.vsanSystem'].QueryDisksForVsan()
                if result.state == 'eligible'
            ]
            ssds = [disk for disk in disks if disk.ssd]
            hdds = [disk for disk in disks if not disk.ssd]

            for disk in ssds:
                diskmap[host]['cache'].append(disk)
                size = disk.capacity.block * disk.capacity.blockSize
                cacheDisks.append((disk.displayName, sizeof_fmt(size),
                                   hostProps[host]['name']))
            for disk in hdds:
                diskmap[host]['capacity'].append(disk)
                size = disk.capacity.block * disk.capacity.blockSize
                capacityDisks.append((disk.displayName, sizeof_fmt(size),
                                      hostProps[host]['name']))

    print 'Claim these disks to cache disks'
    for disk in cacheDisks:
        print 'Name:{}, Size:{}, Host:{}'.format(disk[0], disk[1], disk[2])

    print 'Claim these disks to capacity disks'
    for disk in capacityDisks:
        print 'Name:{}, Size:{}, Host:{}'.format(disk[0], disk[1], disk[2])

    for host, disks in diskmap.iteritems():
        if disks['cache'] and disks['capacity']:
            dm = vim.VimVsanHostDiskMappingCreationSpec(
                cacheDisks=disks['cache'],
                capacityDisks=disks['capacity'],
                creationType='allFlash' if isallFlash else 'hybrid',
                host=host)

            task = vsanVcDiskManagementSystem.InitializeDiskMappings(dm)
            tasks.append(task)

    print 'Wait for create disk group tasks finish'
    vsanapiutils.WaitForTasks(tasks, si)
    del tasks[:]

    print 'Display disk groups in each host'
    for host in hosts:
        diskMaps = vsanVcDiskManagementSystem.QueryDiskMappings(host)

        for index, diskMap in enumerate(diskMaps, 1):
            print 'Host:{}, DiskGroup:{}, Cache Disks:{}, Capacity Disks:{}'.format(
                hostProps[host]['name'], index,
                diskMap.mapping.ssd.displayName,
                [disk.displayName for disk in diskMap.mapping.nonSsd])

    print 'Enable perf service on this cluster'
    vsanPerfSystem = vcMos['vsan-performance-manager']
    task = vsanPerfSystem.CreateStatsObjectTask(cluster)
    vsanapiutils.WaitForTasks([task], si)
Ejemplo n.º 11
0
def main():
    args = GetArgs()
    if args.password:
        password = args.password
    else:
        password = getpass.getpass(prompt='Enter password for host %s and '
                                   'user %s: ' % (args.host, args.user))

    # For python 2.7.9 and later, the default SSL context has more strict
    # connection handshaking rule. We may need turn off the hostname checking
    # and client side cert verification.
    context = None
    if sys.version_info[:3] > (2, 7, 8):
        context = ssl.create_default_context()
        context.check_hostname = False
        context.verify_mode = ssl.CERT_NONE

    si = SmartConnect(host=args.host,
                      user=args.user,
                      pwd=password,
                      port=int(args.port),
                      sslContext=context)

    atexit.register(Disconnect, si)

    # Detecting whether the host is vCenter or ESXi.
    aboutInfo = si.content.about
    apiVersion = vsanapiutils.GetLatestVmodlVersion(args.host)

    if aboutInfo.apiType == 'VirtualCenter':
        majorApiVersion = aboutInfo.apiVersion.split('.')[0]
        if int(majorApiVersion) < 6:
            print(
                'The Virtual Center with version %s (lower than 6.0) is not supported.'
                % aboutInfo.apiVersion)
            return -1

        # Get vSAN health system from the vCenter Managed Object references.
        vcMos = vsanapiutils.GetVsanVcMos(si._stub,
                                          context=context,
                                          version=apiVersion)
        vhs = vcMos['vsan-cluster-health-system']

        cluster = getClusterInstance(args.clusterName, si)

        if cluster is None:
            print("Cluster %s is not found for %s" %
                  (args.clusterName, args.host))
            return -1

        # vSAN cluster health summary can be cached at vCenter.
        healthSummary = vhs.QueryClusterHealthSummary(cluster=cluster,
                                                      includeObjUuids=True)
        clusterStatus = healthSummary.clusterStatus

        print("Cluster %s Status: %s" %
              (args.clusterName, clusterStatus.status))
        for hostStatus in clusterStatus.trackedHostsStatus:
            print("Host %s Status: %s" %
                  (hostStatus.hostname, hostStatus.status))

        # Here is an example of how to track a task returned by the vSAN API.
        vsanTask = vhs.RepairClusterObjectsImmediate(cluster)
        # Convert to vCenter task and bind the MO with vCenter session.
        vcTask = vsanapiutils.ConvertVsanTaskToVcTask(vsanTask, si._stub)
        vsanapiutils.WaitForTasks([vcTask], si)
        print('Repairing cluster objects task completed with state: %s' %
              vcTask.info.state)
Ejemplo n.º 12
0
#   print('Fetching cluster health from cached state: %s' %
#          ('Yes' if fetchFromCache else 'No'))
healthSummary = vhs.QueryClusterHealthSummary(cluster=cluster,
                                              includeObjUuids=True,
                                              fetchFromCache=fetchFromCache)
clusterStatus = healthSummary.clusterStatus

print("Cluster %s Status: %s" % (args.clusterName, clusterStatus.status))
for hostStatus in clusterStatus.trackedHostsStatus:
    print("Host %s Status: %s" % (hostStatus.hostname, hostStatus.status))

# Here is an example of how to track a task returned by the vSAN API.
vsanTask = vhs.RepairClusterObjectsImmediate(cluster)
# Convert to vCenter task and bind the MO with vCenter session.
vcTask = vsanapiutils.ConvertVsanTaskToVcTask(vsanTask, si._stub)
vsanapiutils.WaitForTasks([vcTask], si)
print('Repairing cluster objects task completed with state: %s' %
      vcTask.info.state)

#if aboutInfo.apiType == 'HostAgent':
#   majorApiVersion = aboutInfo.apiVersion.split('.')[0]
#   if int(majorApiVersion) < 6:
#      print('The ESXi with version %s (lower than 6.0) is not supported.'
#            % aboutInfo.apiVersion)
#      return -1

# Get vSAN health system from the ESXi Managed Object references.
#  esxMos = vsanapiutils.GetVsanEsxMos(
#        si._stub, context=context, version=apiVersion)
#  vpm = esxMos['vsan-performance-manager']
Ejemplo n.º 13
0
def main():
    args = GetArgs()
    if args.password:
        password = args.password
    else:
        password = getpass.getpass(prompt='Enter password for host %s and '
                                   'user %s: ' % (args.host, args.user))

    # For python 2.7.9 and later, the default SSL context has more strict
    # connection handshaking rule. We may need turn off the hostname checking
    # and client side cert verification.
    context = None
    if sys.version_info[:3] > (2, 7, 8):
        context = ssl.create_default_context()
        context.check_hostname = False
        context.verify_mode = ssl.CERT_NONE

    si = SmartConnect(host=args.host,
                      user=args.user,
                      pwd=password,
                      port=int(args.port),
                      sslContext=context)

    atexit.register(Disconnect, si)

    # Detecting whether the host is vCenter or ESXi.
    aboutInfo = si.content.about
    apiVersion = vsanapiutils.GetLatestVmodlVersion(args.host)

    if aboutInfo.apiType == 'VirtualCenter':
        majorApiVersion = aboutInfo.apiVersion.split('.')[0]
        if int(majorApiVersion) < 6:
            print(
                'The Virtual Center with version %s (lower than 6.0) is not supported.'
                % aboutInfo.apiVersion)
            return -1

        # Get vSAN health system from the vCenter Managed Object references.
        vcMos = vsanapiutils.GetVsanVcMos(si._stub,
                                          context=context,
                                          version=apiVersion)
        vhs = vcMos['vsan-cluster-health-system']

        cluster = getClusterInstance(args.clusterName, si)

        if cluster is None:
            print("Cluster %s is not found for %s" %
                  (args.clusterName, args.host))
            return -1

        # vSAN cluster health summary can be cached at vCenter.
        fetchFromCache = False
        # fetchFromCacheAnswer = input(
        #    'Do you want to fetch the cluster health from cache if exists?(y/n):')
        # if fetchFromCacheAnswer.lower() == 'n':
        #    fetchFromCache = False
        # print('Fetching cluster health from cached state: %s' %
        #        ('Yes' if fetchFromCache else 'No'))

        healthSummary = vhs.QueryClusterHealthSummary(
            cluster=cluster,
            includeObjUuids=True,
            fetchFromCache=fetchFromCache)

        clusterStatus = healthSummary.clusterStatus
        groups = healthSummary.groups
        import pprint
        # pprint.pprint(groups)
        for group in groups:
            pprint.pprint(group.groupName)
            if group.groupName == 'Cluster':
                pprint.pprint(group.groupId)
                for test in group.groupTests:
                    pprint.pprint(test.testName + " " + test.testHealth)

        print("Cluster %s Status: %s" %
              (args.clusterName, clusterStatus.status))
        for hostStatus in clusterStatus.trackedHostsStatus:
            print("Host %s Status: %s" %
                  (hostStatus.hostname, hostStatus.status))

        # Here is an example of how to track a task returned by the vSAN API.
        vsanTask = vhs.RepairClusterObjectsImmediate(cluster)
        # Convert to vCenter task and bind the MO with vCenter session.
        vcTask = vsanapiutils.ConvertVsanTaskToVcTask(vsanTask, si._stub)
        vsanapiutils.WaitForTasks([vcTask], si)
        print('Repairing cluster objects task completed with state: %s' %
              vcTask.info.state)

    print(aboutInfo.apiType)
    if aboutInfo.apiType == 'HostAgent':
        majorApiVersion = aboutInfo.apiVersion.split('.')[0]
        if int(majorApiVersion) < 6:
            print(
                'The ESXi with version %s (lower than 6.0) is not supported.' %
                aboutInfo.apiVersion)
            return -1

        # Get vSAN health system from the ESXi Managed Object references.
        esxMos = vsanapiutils.GetVsanEsxMos(si._stub,
                                            context=context,
                                            version=apiVersion)
        vpm = esxMos['vsan-performance-manager']
        esxhealth = esxMos['vsan-cluster-health-system']

        import pprint
        pprint.pprint(dir(esxhealth))

        cluster = getClusterInstance(args.clusterName, si)
        esxsummary = esxhealth.VsanQueryVcClusterHealthSummary(cluster)
        pprint.pprint(esxsummary)
        pprint.pprint(dir(esxsummary))

        nodeInfo = vpm.VsanPerfQueryNodeInformation()[0]

        print(nodeInfo)
        print('Hostname: %s' % args.host)
        print('  version: %s' % nodeInfo.version)
        print('  isCmmdsMaster: %s' % nodeInfo.isCmmdsMaster)
        print('  isStatsMaster: %s' % nodeInfo.isStatsMaster)
        print('  vsanMasterUuid: %s' % nodeInfo.vsanMasterUuid)
        print('  vsanNodeUuid: %s' % nodeInfo.vsanNodeUuid)