Exemplo n.º 1
0
def add_capacity_to_diskgroup(service_instance, vsan_disk_mgmt_system,
                              host_ref, diskgroup, new_capacity_disks):
    """
    Adds capacity disk(s) to a disk group.

    service_instance
        Service instance to the host or vCenter

    vsan_disk_mgmt_system
        vim.VimClusterVsanVcDiskManagemenetSystem representing the vSan disk
        management system retrieved from the vsan endpoint.

    host_ref
        vim.HostSystem object representing the target host the disk group will
        be created on

    diskgroup
        The vsan.HostDiskMapping object representing the host's diskgroup where
        the additional capacity needs to be added

    new_capacity_disks
        List of vim.HostScsiDisk objects representing the disks to be added as
        capacity disks. Can be either ssd or non-ssd. There must be a minimum
        of 1 new capacity disk in the list.
    """
    hostname = salt.utils.vmware.get_managed_object_name(host_ref)
    cache_disk = diskgroup.ssd
    cache_disk_id = cache_disk.canonicalName
    log.debug(
        "Adding capacity to disk group with cache disk '%s' on host '%s'",
        cache_disk_id,
        hostname,
    )
    log.trace("new_capacity_disk_ids = %s",
              [c.canonicalName for c in new_capacity_disks])
    spec = vim.VimVsanHostDiskMappingCreationSpec()
    spec.cacheDisks = [cache_disk]
    spec.capacityDisks = new_capacity_disks
    # All new capacity disks must be either ssd or non-ssd (mixed disks are not
    # supported); also they need to match the type of the existing capacity
    # disks; we assume disks are already validated
    spec.creationType = ("allFlash" if getattr(new_capacity_disks[0], "ssd")
                         else "hybrid")
    spec.host = host_ref
    try:
        task = vsan_disk_mgmt_system.InitializeDiskMappings(spec)
    except vim.fault.NoPermission as exc:
        log.exception(exc)
        raise VMwareApiError("Not enough permissions. Required privilege: "
                             "{0}".format(exc.privilegeId))
    except vim.fault.VimFault as exc:
        log.exception(exc)
        raise VMwareApiError(exc.msg)
    except vmodl.fault.MethodNotFound as exc:
        log.exception(exc)
        raise VMwareRuntimeError("Method '{0}' not found".format(exc.method))
    except vmodl.RuntimeFault as exc:
        raise VMwareRuntimeError(exc.msg)
    _wait_for_tasks([task], service_instance)
    return True
Exemplo n.º 2
0
def create_diskgroup(service_instance, vsan_disk_mgmt_system, host_ref,
                     cache_disk, capacity_disks):
    """
    Creates a disk group

    service_instance
        Service instance to the host or vCenter

    vsan_disk_mgmt_system
        vim.VimClusterVsanVcDiskManagemenetSystem representing the vSan disk
        management system retrieved from the vsan endpoint.

    host_ref
        vim.HostSystem object representing the target host the disk group will
        be created on

    cache_disk
        The vim.HostScsidisk to be used as a cache disk. It must be an ssd disk.

    capacity_disks
        List of vim.HostScsiDisk objects representing of disks to be used as
        capacity disks. Can be either ssd or non-ssd. There must be a minimum
        of 1 capacity disk in the list.
    """
    hostname = salt.utils.vmware.get_managed_object_name(host_ref)
    cache_disk_id = cache_disk.canonicalName
    log.debug(
        "Creating a new disk group with cache disk '%s' on host '%s'",
        cache_disk_id,
        hostname,
    )
    log.trace("capacity_disk_ids = %s",
              [c.canonicalName for c in capacity_disks])
    spec = vim.VimVsanHostDiskMappingCreationSpec()
    spec.cacheDisks = [cache_disk]
    spec.capacityDisks = capacity_disks
    # All capacity disks must be either ssd or non-ssd (mixed disks are not
    # supported)
    spec.creationType = "allFlash" if getattr(capacity_disks[0],
                                              "ssd") else "hybrid"
    spec.host = host_ref
    try:
        task = vsan_disk_mgmt_system.InitializeDiskMappings(spec)
    except vim.fault.NoPermission as exc:
        log.exception(exc)
        raise VMwareApiError(
            "Not enough permissions. Required privilege: {}".format(
                exc.privilegeId))
    except vim.fault.VimFault as exc:
        log.exception(exc)
        raise VMwareApiError(exc.msg)
    except vmodl.fault.MethodNotFound as exc:
        log.exception(exc)
        raise VMwareRuntimeError("Method '{}' not found".format(exc.method))
    except vmodl.RuntimeFault as exc:
        log.exception(exc)
        raise VMwareRuntimeError(exc.msg)
    _wait_for_tasks([task], service_instance)
    return True
Exemplo n.º 3
0
def main():
    server = 'vcenter'
    si = vc_connect(server)
    atexit.register(connect.Disconnect, si)
    dc = si.content.rootFolder.childEntity[0]  # first datacenter
    content = si.RetrieveContent()
    esxihost = get_obj(content, [vim.HostSystem], 'esxi_host')
    if esxihost is None:
        print(f'Failed to find {esxihost}  in datacenter {dc.name}')
    else:
        # https://www.tachytelic.net/2014/03/posix-size-converter/
        # formula for bock to GB conversion = Number Of 512 byte blocks/2/1024/1024
        diskmap = {esxihost: {'cache': [], 'capacity': []}}
        cacheDisks = []
        capacityDisks = []
        result = esxihost.configManager.vsanSystem.QueryDisksForVsan()
        ssds = []
        for ssd in result:
            if ssd.state == 'eligible' and (
                    ssd.disk.capacity.block) / 2 / 1024 / 1024 > 300:
                ssds.append(ssd.disk)
        # https://bit.ly/37lvGc3  vSAN SDKs Programming Guide
        if ssds:
            smallerSize = min([
                disk.capacity.block * disk.capacity.blockSize for disk in ssds
            ])
            for ssd in ssds:
                size = ssd.capacity.block * ssd.capacity.blockSize
                if size == smallerSize:
                    diskmap[esxihost]['cache'].append(ssd)
                    cacheDisks.append((ssd.displayName, size, esxihost.name))
                else:
                    diskmap[esxihost]['capacity'].append(ssd)
                    capacityDisks.append(
                        (ssd.displayName, size, esxihost.name))

            for host, disks in diskmap.items():
                if disks['cache'] and disks['capacity']:
                    dm = vim.VimVsanHostDiskMappingCreationSpec(
                        cacheDisks=disks['cache'],
                        capacityDisks=disks['capacity'],
                        creationType='allFlash',
                        host=host)
            # Execute the task
            tasks = []
            context = ssl.create_default_context()
            context.check_hostname = False
            context.verify_mode = ssl.CERT_NONE
            # next two lines from https://github.com/storage-code/vsanDeploy/blob/master/vsanDeploy.py
            vcMos = vsanapiutils.GetVsanVcMos(si._stub, context=context)
            vsanVcDiskManagementSystem = vcMos['vsan-disk-management-system']
            task = vsanVcDiskManagementSystem.InitializeDiskMappings(dm)
            tasks.append(task)
            vsanapiutils.WaitForTasks(tasks, si)
        else:
            print('no disks to add')
            exit()
Exemplo n.º 4
0
        ['configManager.vsanSystem'].QueryDisksForVsan()
        if result.state == 'eligible' and result.disk.ssd
    ]
    smallerSize = min(
        [disk.capacity.block * disk.capacity.blockSize for disk in ssds])
    for ssd in ssds:
        size = ssd.capacity.block * ssd.capacity.blockSize
        if size == smallerSize:
            diskmap[host]['cache'].append(ssd)
            cacheDisks.append(
                (ssd.displayName, sizeof_fmt(size), hostProps[host]['name']))
        else:
            diskmap[host]['capacity'].append(ssd)
            capacityDisks.append(
                (ssd.displayName, sizeof_fmt(size), hostProps[host]['name']))

tasks = []
for host, disks in diskmap.items():
    if len(disks['cache']) > len(disks['capacity']):
        disks['cache'] = disks['cache'][:len(disks['capacity'])]
    try:
        dm = vim.VimVsanHostDiskMappingCreationSpec(
            cacheDisks=disks['cache'],
            capacityDisks=disks['capacity'],
            creationType=deploy_type,
            host=host)
        task = vsanVcDiskManagementSystem.InitializeDiskMappings(dm)
        tasks.append(task)
    except expression as identifier:
        print("Some vSan Claim error... Check vSan...")
Exemplo n.º 5
0
def main():
    args = GetArgs()
    if args.password:
        password = args.password
    else:
        password = getpass.getpass(prompt='Enter password for host %s and '
                                   'user %s: ' % (args.host, args.user))

    # For python 2.7.9 and later, the defaul SSL conext has more strict
    # connection handshaking rule. We may need turn of the hostname checking
    # and client side cert verification
    context = None
    if sys.version_info[:3] > (2, 7, 8):
        context = ssl.create_default_context()
        context.check_hostname = False
        context.verify_mode = ssl.CERT_NONE

    si = SmartConnect(host=args.host,
                      user=args.user,
                      pwd=password,
                      port=int(args.port),
                      sslContext=context)

    atexit.register(Disconnect, si)

    cluster = getClusterInstance(args.clusterName, si)

    if args.vsanlicense:
        print 'Assign VSAN license'
        lm = si.content.licenseManager
        lam = lm.licenseAssignmentManager
        lam.UpdateAssignedLicense(entity=cluster._moId,
                                  licenseKey=args.vsanlicense)

    vcMos = vsanapiutils.GetVsanVcMos(si._stub, context=context)

    vsanClusterSystem = vcMos['vsan-cluster-config-system']
    vsanVcDiskManagementSystem = vcMos['vsan-disk-management-system']

    isallFlash = args.allflash

    print 'Enable VSAN with {} mode'.format(
        'all flash ' if isallFlash else 'hybrid')

    hostProps = CollectMultiple(
        si.content, cluster.host,
        ['name', 'configManager.vsanSystem', 'configManager.storageSystem'])
    hosts = hostProps.keys()

    for host in hosts:
        disks = [
            result.disk for result in hostProps[host]
            ['configManager.vsanSystem'].QueryDisksForVsan()
            if result.state == 'ineligible'
        ]
        print 'Find ineligible disks {} in host {}'.format(
            [disk.displayName for disk in disks], hostProps[host]['name'])
        for disk in disks:
            if yes('Do you want to wipe disk {}?\nPlease Always check the partition table and the data stored'
                   ' on those disks before doing any wipe! (yes/no)?'.format(
                       disk.displayName)):
                hostProps[host][
                    'configManager.storageSystem'].UpdateDiskPartitions(
                        disk.deviceName, vim.HostDiskPartitionSpec())

    tasks = []

    configInfo = vim.VsanHostConfigInfo(
        networkInfo=vim.VsanHostConfigInfoNetworkInfo(port=[
            vim.VsanHostConfigInfoNetworkInfoPortConfig(
                device=args.vmknic,
                ipConfig=vim.VsanHostIpConfig(upstreamIpAddress='224.1.2.3',
                                              downstreamIpAddress='224.2.3.4'))
        ]))

    for host in hosts:
        print 'Enable VSAN trafic in host {} with {}'.format(
            hostProps[host]['name'], args.vmknic)
        task = hostProps[host]['configManager.vsanSystem'].UpdateVsan_Task(
            configInfo)
        tasks.append(task)
    vsanapiutils.WaitForTasks(tasks, si)
    del tasks[:]

    print 'Enable VSAN by claiming disks manually'
    #Build vsanReconfigSpec step by step, it only take effect after method VsanClusterReconfig is called
    vsanReconfigSpec = vim.VimVsanReconfigSpec(
        modify=True,
        vsanClusterConfig=vim.VsanClusterConfigInfo(
            enabled=True,
            defaultConfig=vim.VsanClusterConfigInfoHostDefaultInfo(
                autoClaimStorage=False)))

    if isallFlash:
        print 'Enable deduplication and compression for VSAN'
        vsanReconfigSpec.dataEfficiencyConfig = vim.VsanDataEfficiencyConfig(
            compressionEnabled=True, dedupEnabled=True)

    if args.faultdomains:
        print 'Add fault domains in vsan'
        faultDomains = []
        #args.faultdomains is a string like f1:host1,host2 f2:host3,host4
        for faultdomain in args.faultdomains.split():
            fname, hostnames = faultdomain.split(':')
            domainSpec = vim.cluster.VsanFaultDomainSpec(
                name=fname,
                hosts=[
                    host for host in hosts
                    if hostProps[host]['name'] in hostnames.split(',')
                ])
            faultDomains.append(domainSpec)

        vsanReconfigSpec.faultDomainsSpec = vim.VimClusterVsanFaultDomainsConfigSpec(
            faultDomains=faultDomains)

    task = vsanClusterSystem.VsanClusterReconfig(cluster, vsanReconfigSpec)
    vsanapiutils.WaitForTasks([task], si)

    diskmap = {host: {'cache': [], 'capacity': []} for host in hosts}
    cacheDisks = []
    capacityDisks = []

    if isallFlash:
        #Get eligible ssd from host
        for host in hosts:
            ssds = [
                result.disk for result in hostProps[host]
                ['configManager.vsanSystem'].QueryDisksForVsan()
                if result.state == 'eligible' and result.disk.ssd
            ]
            smallerSize = min([
                disk.capacity.block * disk.capacity.blockSize for disk in ssds
            ])
            for ssd in ssds:
                size = ssd.capacity.block * ssd.capacity.blockSize
                if size == smallerSize:
                    diskmap[host]['cache'].append(ssd)
                    cacheDisks.append((ssd.displayName, sizeof_fmt(size),
                                       hostProps[host]['name']))
                else:
                    diskmap[host]['capacity'].append(ssd)
                    capacityDisks.append((ssd.displayName, sizeof_fmt(size),
                                          hostProps[host]['name']))
    else:
        for host in hosts:
            disks = [
                result.disk for result in hostProps[host]
                ['configManager.vsanSystem'].QueryDisksForVsan()
                if result.state == 'eligible'
            ]
            ssds = [disk for disk in disks if disk.ssd]
            hdds = [disk for disk in disks if not disk.ssd]

            for disk in ssds:
                diskmap[host]['cache'].append(disk)
                size = disk.capacity.block * disk.capacity.blockSize
                cacheDisks.append((disk.displayName, sizeof_fmt(size),
                                   hostProps[host]['name']))
            for disk in hdds:
                diskmap[host]['capacity'].append(disk)
                size = disk.capacity.block * disk.capacity.blockSize
                capacityDisks.append((disk.displayName, sizeof_fmt(size),
                                      hostProps[host]['name']))

    print 'Claim these disks to cache disks'
    for disk in cacheDisks:
        print 'Name:{}, Size:{}, Host:{}'.format(disk[0], disk[1], disk[2])

    print 'Claim these disks to capacity disks'
    for disk in capacityDisks:
        print 'Name:{}, Size:{}, Host:{}'.format(disk[0], disk[1], disk[2])

    for host, disks in diskmap.iteritems():
        if disks['cache'] and disks['capacity']:
            dm = vim.VimVsanHostDiskMappingCreationSpec(
                cacheDisks=disks['cache'],
                capacityDisks=disks['capacity'],
                creationType='allFlash' if isallFlash else 'hybrid',
                host=host)

            task = vsanVcDiskManagementSystem.InitializeDiskMappings(dm)
            tasks.append(task)

    print 'Wait for create disk group tasks finish'
    vsanapiutils.WaitForTasks(tasks, si)
    del tasks[:]

    print 'Display disk groups in each host'
    for host in hosts:
        diskMaps = vsanVcDiskManagementSystem.QueryDiskMappings(host)

        for index, diskMap in enumerate(diskMaps, 1):
            print 'Host:{}, DiskGroup:{}, Cache Disks:{}, Capacity Disks:{}'.format(
                hostProps[host]['name'], index,
                diskMap.mapping.ssd.displayName,
                [disk.displayName for disk in diskMap.mapping.nonSsd])

    print 'Enable perf service on this cluster'
    vsanPerfSystem = vcMos['vsan-performance-manager']
    task = vsanPerfSystem.CreateStatsObjectTask(cluster)
    vsanapiutils.WaitForTasks([task], si)
Exemplo n.º 6
0
        result.disk for result in hostProps[host]
        ['configManager.vsanSystem'].QueryDisksForVsan()
        if result.state == 'eligible' and result.disk.ssd
    ]
    smallerSize = min(
        [disk.capacity.block * disk.capacity.blockSize for disk in ssds])
    for ssd in ssds:
        size = ssd.capacity.block * ssd.capacity.blockSize
        if size == smallerSize:
            diskmap[host]['cache'].append(ssd)
            cacheDisks.append(
                (ssd.displayName, sizeof_fmt(size), hostProps[host]['name']))
        else:
            diskmap[host]['capacity'].append(ssd)
            capacityDisks.append(
                (ssd.displayName, sizeof_fmt(size), hostProps[host]['name']))

tasks = []
for host, disks in diskmap.items():
    if len(disks['cache']) > len(disks['capacity']):
        disks['cache'] = disks['cache'][:len(disks['capacity'])]

    dm = vim.VimVsanHostDiskMappingCreationSpec(
        cacheDisks=disks['cache'],
        capacityDisks=disks['capacity'],
        creationType='allFlash',
        host=host)

    task = vsanVcDiskManagementSystem.InitializeDiskMappings(dm)
    tasks.append(task)