Ejemplo n.º 1
0
 def __exit__(self, *args):
     Disconnect(self.client.si)
Ejemplo n.º 2
0
    customspec = vim.vm.customization.Specification(nicSettingMap=[adaptermap], globalIPSettings=globalip,
                                                    identity=ident)
    # config = get_vmconfig(content,1,1024,template,40)
    relospec = vim.vm.RelocateSpec()
    relospec.datastore = datastore
    relospec.pool = resource_pool
    clonespec = vim.vm.CloneSpec(powerOn=power_on, template=False, location=relospec, customization=customspec, config=specconfig)

    print("cloning VM...")
    # print(template.parent)
    task = template.Clone(folder=destfolder, name=vm_name, spec=clonespec)
    wait_for_task(task)


    # vm = get_obj(content, [vim.VirtualMachine], vm_name)
    # vmtask = vm.ReconfigVM_Task(spec=spec)
    # wait_for_task(vmtask)


if __name__ == '__main__':
    service_instance = connect_vc(host="", user="", pwd="")
    content = service_instance.RetrieveContent()
    clone_vm(content=content, template='CentOS7-templates', vm_name='clone_vm_test3',
             datacenter_name='DataCenter', vm_folder='', datastore_name='Datastore',
             resource_pool='Resources', power_on=False, numcpu=2, mensize=4096, ipaddr="192.168.1.16",
             subnetmask="255.255.255.0", gateway="192.168.1.1", dnsdomain="localhost", newvmhostname="clonevmtest",
             dnsServerList=['223.5.5.5', '114.114.114.114'])
    Disconnect(service_instance)


Ejemplo n.º 3
0
def main():
    supportedArgs = [
        (["P:",
          "primary host="], "localhost", "Primary host name", "primaryHost"),
        (["S:", "secondary host="], "localhost", "Secondary host name",
         "secondaryHost"),
        (["d:", "shared datastore name="], "storage1", "shared datastore name",
         "dsName"),
        (["k:", "keep="], "0", "Keep configs", "keep"),
        (["u:", "user="******"root", "User name", "user"),
        (["p:", "pwd="], "", "Password", "pwd"),
        (["v:", "vmname="], "vmFT", "Name of the virtual machine", "vmname"),
        (["i:", "numiter="], "1", "Number of iterations", "iter"),
        (["t:",
          "FT type="], "up", "Type of fault tolerance [up|smp]", "ftType"),
    ]
    supportedToggles = [(["usage",
                          "help"], False, "Show usage information", "usage")]

    args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles)
    if args.GetKeyValue("usage") == True:
        args.Usage()
        sys.exit(0)

    # Process command line
    vmname = args.GetKeyValue("vmname")
    numiter = int(args.GetKeyValue("iter"))
    keep = int(args.GetKeyValue("keep"))
    dsName = args.GetKeyValue("dsName")
    primaryHost = args.GetKeyValue("primaryHost")
    secondaryHost = args.GetKeyValue("secondaryHost")
    ftType = args.GetKeyValue("ftType")

    numCPU = 2 if ftType == "smp" else 1
    memSize = 64

    for i in range(numiter):
        primaryVm = None
        primarySi = None
        secondarySi = None
        try:
            # Connect to primary host
            primarySi = SmartConnect(host=primaryHost,
                                     user=args.GetKeyValue("user"),
                                     pwd=args.GetKeyValue("pwd"))
            Log("Connected to Primary host")

            # Cleanup from previous runs
            try:
                CleanupVm(vmname)
            except vim.fault.InvalidOperationOnSecondaryVm:
                pass

# Connect to secondary host
            secondarySi = SmartConnect(host=secondaryHost,
                                       user=args.GetKeyValue("user"),
                                       pwd=args.GetKeyValue("pwd"))
            Log("Connected to Secondary host")

            for si in [primarySi, secondarySi]:
                if len(FindNicType(si, ftLoggingNicType)) == 0:
                    SelectVnic(si, "vmk0", ftLoggingNicType)
                if len(FindNicType(si, vmotionNicType)) == 0:
                    SelectVnic(si, "vmk0", vmotionNicType)

            ftMgrDst = host.GetFaultToleranceMgr(secondarySi)

            # Cleanup from previous runs
            CleanupVm(vmname)
            CleanupVm(vmname, True)

            connect.SetSi(primarySi)
            CleanupDir(dsName, vmname)
            if ftType == "smp":
                CleanupDir(dsName, "%s_shared" % vmname)

# Create new VM
            Log("Creating primary VM " + vmname)
            primaryVm = vm.CreateQuickDummy(vmname,
                                            guest="winNetEnterpriseGuest",
                                            numScsiDisks=2,
                                            scrubDisks=True,
                                            memory=memSize,
                                            datastoreName=dsName)
            primaryUuid = primaryVm.GetConfig().GetInstanceUuid()
            primaryCfgPath = primaryVm.GetConfig().GetFiles().GetVmPathName()
            primaryDir = primaryCfgPath[:primaryCfgPath.rfind("/")]

            ftMetadataDir = GetSharedPath(primarySi, primaryVm)

            Log("Using VM : " + primaryVm.GetName() + " with instanceUuid " +
                primaryUuid)

            ftMetadataDir = GetSharedPath(primarySi, primaryVm)
            cSpec = vim.vm.ConfigSpec()
            if ftType != "smp":
                # Enable record/replay for the primaryVm
                # See PR 200254
                flags = vim.vm.FlagInfo(recordReplayEnabled=True)
                cSpec.SetFlags(flags)
                task = primaryVm.Reconfigure(cSpec)
                WaitForTask(task)
                Log("Enabled record/replay for Primary VM.")
                CheckFTState(
                    primaryVm,
                    vim.VirtualMachine.FaultToleranceState.notConfigured)
            else:
                cSpec.files = vim.vm.FileInfo(
                    ftMetadataDirectory=ftMetadataDir)
                cSpec.numCPUs = numCPU
                task = primaryVm.Reconfigure(cSpec)
                WaitForTask(task)

            # Create secondary VM
            connect.SetSi(secondarySi)
            Log("Creating secondary VM " + vmname)
            secondaryVm = vm.CreateQuickSecondary(vmname,
                                                  primaryVm,
                                                  ftType=ftType,
                                                  scrubDisks=True,
                                                  numScsiDisks=2,
                                                  datastoreName=dsName,
                                                  ftMetadataDir=ftMetadataDir)
            if secondaryVm == None:
                raise "Secondary VM creation failed"
            secondaryUuid = secondaryVm.GetConfig().GetInstanceUuid()
            secondaryCfgPath = secondaryVm.GetConfig().GetFiles(
            ).GetVmPathName()
            Log("Created secondary VM " + secondaryVm.GetName())
            Log("Secondry VM: instanceUuid " + secondaryUuid)
            Log("Secondary cfg path: " + secondaryCfgPath)

            ##  Configure some additional config variables needed for FT
            ##  This should eventually be done automatically at FT Vmotion time
            Log("Setting up extra config settings for the primary VM...")

            cSpec = vim.Vm.ConfigSpec()
            extraCfgs = []
            if ftType == "smp":  # some of these options are temporary
                cSpec.flags = vim.vm.FlagInfo(
                    faultToleranceType=FTType.checkpointing)
                AddExtraConfig(extraCfgs, "ftcpt.maxDiskBufferPages", "0")
                AddExtraConfig(extraCfgs, "sched.mem.pshare.enable", "FALSE")
                AddExtraConfig(extraCfgs, "sched.mem.fullreservation", "TRUE")
                AddExtraConfig(extraCfgs,
                               "monitor_control.disable_mmu_largepages",
                               "TRUE")
                AddExtraConfig(extraCfgs, "sched.mem.min", memSize)
                AddExtraConfig(extraCfgs, "migration.dataTimeout", "2000")
                cSpec.files = vim.vm.FileInfo(
                    ftMetadataDirectory=ftMetadataDir)
            else:
                cSpec.flags = vim.vm.FlagInfo(
                    faultToleranceType=FTType.recordReplay)
                AddExtraConfig(extraCfgs, "replay.allowBTOnly", "TRUE")

            cSpec.SetExtraConfig(extraCfgs)
            WaitForTask(primaryVm.Reconfigure(cSpec))

            # Register secondary VM
            Log("Register secondary VM with the primary")
            ftMgr = host.GetFaultToleranceMgr(primarySi)
            connect.SetSi(primarySi)
            task = ftMgr.RegisterSecondary(primaryVm, secondaryUuid,
                                           secondaryCfgPath)
            WaitForTask(task)
            Log("Secondary VM registered successfully")

            # Verify FT role & state
            CheckFTRole(primaryVm, 1)
            CheckFTState(primaryVm,
                         vim.VirtualMachine.FaultToleranceState.enabled)

            Log("FT configured successfully.")

            # PowerOn FT VM
            Log("Powering on Primary VM")
            vm.PowerOn(primaryVm)
            if ftType == "smp":  # some of these options are temporary
                task = primaryVm.CreateSnapshot("snap-early",
                                                "before secondary starts",
                                                memory=False,
                                                quiesce=True)
                WaitForTask(task)

# Perform the FT VMotion
            Log("Calling StartSecondary on remote host...")
            primaryThumbprint = GetHostThumbprint(primaryHost)
            secondaryThumbprint = GetHostThumbprint(secondaryHost)
            Log("Primary thumbprint: %s" % primaryThumbprint)
            Log("Secondary thumbprint: %s" % secondaryThumbprint)

            secondaryHostSystem = secondarySi.content.rootFolder.childEntity[
                0].hostFolder.childEntity[0].host[0]
            sslThumbprintInfo = vim.host.SslThumbprintInfo(
                ownerTag='hostd-test', principal='vpxuser')
            sslThumbprintInfo.sslThumbprints = [primaryThumbprint]
            secondaryHostSystem.UpdateSslThumbprintInfo(
                sslThumbprintInfo, "add")

            sslThumbprintInfo.sslThumbprints = [secondaryThumbprint]
            primaryHostSystem = primarySi.content.rootFolder.childEntity[
                0].hostFolder.childEntity[0].host[0]
            primaryHostSystem.UpdateSslThumbprintInfo(sslThumbprintInfo, "add")

            task = ftMgr.StartSecondaryOnRemoteHost(primaryVm,
                                                    secondaryCfgPath,
                                                    secondaryHost, 80,
                                                    secondaryThumbprint)
            WaitForTask(task)
            Log("Start secondary done.")

            if ftType == "smp":
                # Verify snapshot is gone
                if primaryVm.snapshot is not None:
                    raise Exception("Snapshot still exists on primary")

                task = primaryVm.CreateSnapshot("snap",
                                                "without memory snapshot",
                                                memory=False,
                                                quiesce=True)
                WaitForTask(task)

                if not primaryVm.snapshot or not primaryVm.snapshot.currentSnapshot:
                    raise Exception("Snapshot was not created")
                else:
                    Log("Snapshot %s exists as expected" %
                        primaryVm.snapshot.currentSnapshot)

            # Retrieve reference to new secondary VM
            connect.SetSi(secondarySi)
            secondaryVm = folder.FindCfg(secondaryCfgPath)
            connect.SetSi(primarySi)

            # FT state check
            CheckFTState(primaryVm,
                         vim.VirtualMachine.FaultToleranceState.running)
            CheckFTState(secondaryVm,
                         vim.VirtualMachine.FaultToleranceState.running)

            Log("Start secondary done.")

            # allows some time for FT to run and checkpoint before failing
            # over. This seems more necessary on nested VM environments
            # than physical
            time.sleep(20)

            Log("Failing over to the secondary.")
            WaitForTask(ftMgr.MakePrimary(primaryVm, secondaryUuid))
            WaitForPowerState(primaryVm, primarySi,
                              vim.VirtualMachine.PowerState.poweredOff)
            Log("Verified primary power state is off.")
            WaitForFTState(secondaryVm, FTState.needSecondary)

            Log("Starting secondary.")
            task = ftMgrDst.StartSecondaryOnRemoteHost(secondaryVm,
                                                       primaryCfgPath,
                                                       primaryHost, 80,
                                                       primaryThumbprint)
            WaitForTask(task)

            # Verify snapshot is gone
            if primaryVm.snapshot is not None:
                raise Exception("Snapshot still exists on old primary")

            Log("Failing over to the old-primary.")
            WaitForTask(ftMgrDst.MakePrimary(secondaryVm, secondaryUuid))
            WaitForPowerState(secondaryVm, secondarySi,
                              vim.VirtualMachine.PowerState.poweredOff)
            Log("Verified primary power state is off.")
            WaitForFTState(primaryVm, FTState.needSecondary)

            task = ftMgr.StartSecondaryOnRemoteHost(primaryVm,
                                                    secondaryCfgPath,
                                                    secondaryHost, 80,
                                                    secondaryThumbprint)
            WaitForTask(task)

            # PowerOff FT VMs
            Log("Power off Primary VM")
            vm.PowerOff(primaryVm)
            connect.SetSi(secondarySi)
            for i in range(10):
                if secondaryVm.GetRuntime().GetPowerState(
                ) == vim.VirtualMachine.PowerState.poweredOn:
                    time.sleep(1)
            if secondaryVm.GetRuntime().GetPowerState(
            ) == vim.VirtualMachine.PowerState.poweredOn:
                raise Exception("Secondary VM is still powered on!")
            Log("Verified secondary power state.")

            Log("Unregistering secondary VM " + vmname)
            ftMgrDst.Unregister(secondaryVm)

            # Cleanup
            if not keep:
                connect.SetSi(primarySi)
                CleanupVm(vmname)
                CleanupDir(dsName, vmname)
                if ftType == "smp":
                    CleanupDir(dsName, "%s_shared" % vmname)

                connect.SetSi(secondarySi)
                CleanupVm(vmname, True)
        except Exception as e:
            Log("Caught exception : %s" % e)
            stackTrace = " ".join(
                traceback.format_exception(sys.exc_info()[0],
                                           sys.exc_info()[1],
                                           sys.exc_info()[2]))
            Log(stackTrace)
            global status
            status = "FAIL"
            Disconnect(primarySi)
            Disconnect(secondarySi)
            return

        Disconnect(primarySi)
        Disconnect(secondarySi)
Ejemplo n.º 4
0
def ext_pillar(
        minion_id,
        pillar,  # pylint: disable=W0613
        **kwargs):
    '''
    Check vmware/vcenter for all data
    '''
    vmware_pillar = {}
    host = None
    username = None
    password = None
    property_types = []
    property_name = 'name'
    protocol = None
    port = None
    pillar_key = 'vmware'
    replace_default_attributes = False
    type_specific_pillar_attributes = {
        'VirtualMachine': [
            {
                'config': [
                    'version',
                    'guestId',
                    'files',
                    'tools',
                    'flags',
                    'memoryHotAddEnabled',
                    'cpuHotAddEnabled',
                    'cpuHotRemoveEnabled',
                    'datastoreUrl',
                    'swapPlacement',
                    'bootOptions',
                    'scheduledHardwareUpgradeInfo',
                    'memoryAllocation',
                    'cpuAllocation',
                ]
            },
            {
                'summary': [
                    {
                        'runtime': [
                            {
                                'host': [
                                    'name',
                                    {
                                        'parent': 'name'
                                    },
                                ]
                            },
                            'bootTime',
                        ]
                    },
                    {
                        'guest': [
                            'toolsStatus',
                            'toolsVersionStatus',
                            'toolsVersionStatus2',
                            'toolsRunningStatus',
                        ]
                    },
                    {
                        'config': [
                            'cpuReservation',
                            'memoryReservation',
                        ]
                    },
                    {
                        'storage': [
                            'committed',
                            'uncommitted',
                            'unshared',
                        ]
                    },
                    {
                        'dasVmProtection': ['dasProtected']
                    },
                ]
            },
            {
                'storage': [{
                    'perDatastoreUsage': [
                        {
                            'datastore': 'name'
                        },
                        'committed',
                        'uncommitted',
                        'unshared',
                    ]
                }]
            },
        ],
        'HostSystem': [
            {
                'datastore': [
                    'name', 'overallStatus', {
                        'summary': [
                            'url',
                            'freeSpace',
                            'maxFileSize',
                            'maxVirtualDiskCapacity',
                            'maxPhysicalRDMFileSize',
                            'maxVirtualRDMFileSize',
                            {
                                'vmfs': [
                                    'capacity',
                                    'blockSizeMb',
                                    'maxBlocks',
                                    'majorVersion',
                                    'version',
                                    'uuid',
                                    {
                                        'extent': [
                                            'diskName',
                                            'partition',
                                        ]
                                    },
                                    'vmfsUpgradeable',
                                    'ssd',
                                    'local',
                                ],
                            },
                        ],
                    }, {
                        'vm': 'name'
                    }
                ]
            },
            {
                'vm': [
                    'name',
                    'overallStatus',
                    {
                        'summary': [
                            {
                                'runtime': 'powerState'
                            },
                        ]
                    },
                ]
            },
        ]
    }
    pillar_attributes = [
        {
            'summary': ['overallStatus']
        },
        {
            'network': [
                'name',
                {
                    'config': {
                        'distributedVirtualSwitch': 'name'
                    }
                },
            ]
        },
        {
            'datastore': [
                'name',
            ]
        },
        {
            'parent': ['name']
        },
    ]

    if 'pillar_key' in kwargs:
        pillar_key = kwargs['pillar_key']
    vmware_pillar[pillar_key] = {}

    if 'host' not in kwargs:
        log.error(
            'VMWare external pillar configured but host is not specified in ext_pillar configuration.'
        )
        return vmware_pillar
    else:
        host = kwargs['host']
        log.debug('vmware_pillar -- host = %s', host)

    if 'username' not in kwargs:
        log.error(
            'VMWare external pillar requested but username is not specified in ext_pillar configuration.'
        )
        return vmware_pillar
    else:
        username = kwargs['username']
        log.debug('vmware_pillar -- username = %s', username)

    if 'password' not in kwargs:
        log.error(
            'VMWare external pillar requested but password is not specified in ext_pillar configuration.'
        )
        return vmware_pillar
    else:
        password = kwargs['password']
        log.debug('vmware_pillar -- password = %s', password)

    if 'replace_default_attributes' in kwargs:
        replace_default_attributes = kwargs['replace_default_attributes']
        if replace_default_attributes:
            pillar_attributes = []
            type_specific_pillar_attributes = {}

    if 'property_types' in kwargs:
        for prop_type in kwargs['property_types']:
            if isinstance(prop_type, dict):
                property_types.append(getattr(vim, prop_type.keys()[0]))
                if isinstance(prop_type[prop_type.keys()[0]], list):
                    pillar_attributes = pillar_attributes + prop_type[
                        prop_type.keys()[0]]
                else:
                    log.warning(
                        'A property_type dict was specified, but its value is not a list'
                    )
            else:
                property_types.append(getattr(vim, prop_type))
    else:
        property_types = [vim.VirtualMachine]
    log.debug('vmware_pillar -- property_types = %s', property_types)

    if 'property_name' in kwargs:
        property_name = kwargs['property_name']
    else:
        property_name = 'name'
    log.debug('vmware_pillar -- property_name = %s', property_name)

    if 'protocol' in kwargs:
        protocol = kwargs['protocol']
        log.debug('vmware_pillar -- protocol = %s', protocol)

    if 'port' in kwargs:
        port = kwargs['port']
        log.debug('vmware_pillar -- port = %s', port)

    virtualgrain = None
    osgrain = None
    if 'virtual' in __grains__:
        virtualgrain = __grains__['virtual'].lower()
    if 'os' in __grains__:
        osgrain = __grains__['os'].lower()

    if virtualgrain == 'vmware' or osgrain == 'vmware esxi' or osgrain == 'esxi':
        vmware_pillar[pillar_key] = {}
        try:
            _conn = salt.utils.vmware.get_service_instance(
                host, username, password, protocol, port)
            if _conn:
                data = None
                for prop_type in property_types:
                    data = salt.utils.vmware.get_mor_by_property(
                        _conn,
                        prop_type,
                        minion_id,
                        property_name=property_name)
                    if data:
                        type_name = type(data).__name__.replace('vim.', '')
                        if hasattr(data, 'availableField'):
                            vmware_pillar[pillar_key]['annotations'] = {}
                            for availableField in data.availableField:
                                for customValue in data.customValue:
                                    if availableField.key == customValue.key:
                                        vmware_pillar[pillar_key][
                                            'annotations'][
                                                availableField.
                                                name] = customValue.value
                        type_specific_pillar_attribute = []
                        if type_name in type_specific_pillar_attributes:
                            type_specific_pillar_attribute = type_specific_pillar_attributes[
                                type_name]
                        vmware_pillar[pillar_key] = dictupdate.update(
                            vmware_pillar[pillar_key],
                            _crawl_attribute(
                                data, pillar_attributes +
                                type_specific_pillar_attribute))
                        break
                # explicitly disconnect from vCenter when we are done, connections linger idle otherwise
                Disconnect(_conn)
            else:
                log.error(
                    'Unable to obtain a connection with %s, please verify '
                    'your vmware ext_pillar configuration', host)
        except RuntimeError:
            log.error(
                ('A runtime error occurred in the vmware_pillar, '
                 'this is likely caused by an infinite recursion in '
                 'a requested attribute.  Verify your requested attributes '
                 'and reconfigure the pillar.'))

        return vmware_pillar
    else:
        return {}
Ejemplo n.º 5
0
 def tearDown(self):
     """
   Reset test suite
   """
     Disconnect(self.si)
Ejemplo n.º 6
0
 def close(self):
     Disconnect(self.connection)
Ejemplo n.º 7
0
def get_service_instance(host,
                         username=None,
                         password=None,
                         protocol=None,
                         port=None,
                         mechanism='userpass',
                         principal=None,
                         domain=None):
    '''
    Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.

    host
        The location of the vCenter server or ESX/ESXi host.

    username
        The username used to login to the vCenter server or ESX/ESXi host.
        Required if mechanism is ``userpass``

    password
        The password used to login to the vCenter server or ESX/ESXi host.
        Required if mechanism is ``userpass``

    protocol
        Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
        using the default protocol. Default protocol is ``https``.

    port
        Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
        using the default port. Default port is ``443``.

    mechanism
        pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
        Default mechanism is ``userpass``.

    principal
        Kerberos service principal. Required if mechanism is ``sspi``

    domain
        Kerberos user domain. Required if mechanism is ``sspi``
    '''

    if protocol is None:
        protocol = 'https'
    if port is None:
        port = 443

    service_instance = GetSi()
    if service_instance:
        stub = GetStub()
        if salt.utils.is_proxy() or (hasattr(
                stub, 'host') and stub.host != ':'.join([host, str(port)])):
            # Proxies will fork and mess up the cached service instance.
            # If this is a proxy or we are connecting to a different host
            # invalidate the service instance to avoid a potential memory leak
            # and reconnect
            Disconnect(service_instance)
            service_instance = None
        else:
            return service_instance

    if not service_instance:
        service_instance = _get_service_instance(host, username, password,
                                                 protocol, port, mechanism,
                                                 principal, domain)

    # Test if data can actually be retrieved or connection has gone stale
    log.trace('Checking connection is still authenticated')
    try:
        service_instance.CurrentTime()
    except vim.fault.NotAuthenticated:
        log.trace('Session no longer authenticating. Reconnecting')
        Disconnect(service_instance)
        service_instance = _get_service_instance(host, username, password,
                                                 protocol, port, mechanism,
                                                 principal, domain)
    except vim.fault.VimFault as exc:
        raise salt.exceptions.VMwareApiError(exc.msg)
    except vmodl.RuntimeFault as exc:
        raise salt.exceptions.VMwareRuntimeError(exc.msg)

    return service_instance
Ejemplo n.º 8
0
def main(x):

    # Main Program Loop.
    f = open("vms.csv", "r")
    data = f.read()
    f.close()

    apps = []
    for i in data.split("\n"):
        s = i[:-1].split(",")
        apps.append(s)

    length = len(apps)
    for i in range(1, length - 1):

        try:
            stname = "ST-" + apps[i][0]
            vmoid = getmoid(apps[i][2])
            description = apps[i][1].replace(" ", "")
            sectagid = getstag(stname, description)
            applytag(sectagid, vmoid)
            print("Tagging Successful | VM: " + apps[i][2] + " ID: " + vmoid +
                  " | SecTAG: " + stname)
        except:
            print("** Tagging Process Failed | VM (" + apps[i][2] +
                  ") | May not be local to current set vcenter server **")

    tags = getallsectags()

    # Add in Security Group Cleanup methods here -> Query SG first.
    if x == 1:
        for i in tags:
            tagid, description = gettag(i)
            appid = i.split("-")[1]
            ASG = "SG-" + appid
            AppSection = "AppID-" + appid

            createsection(AppSection)
            conf, headers, code = getfwconfig(AppSection)
            secID = secid(conf)
            ETag = headers["ETag"]
            sgID = createsg(ASG, description, tagid)

            print("Generating Baseline Policies...")
            # Create Outbound Baseline Application Policy.
            sourceconf = createpolicies(secID, ETag,
                                        str(AppSection) + "-OUT", sgID,
                                        "source")
            postsection(sourceconf, ETag, secID)

            # Retrive ETag update for next Policy Creation.
            conf, headers, code = getfwconfig(AppSection)
            secID = secid(conf)
            ETag = headers["ETag"]

            # Apply the Inbound Policy Set.
            destconf = createpolicies(secID, ETag,
                                      str(AppSection) + "-IN", sgID,
                                      "destination")
            postsection(destconf, ETag, secID)

            # Retrive ETag update for moving the section.
            conf, headers, code = getfwconfig(AppSection)
            secID = secid(conf)
            ETag = headers["ETag"]

            # move section before the default Policy.
            movesection(AppSection, secID, ETag, conf)

    Disconnect(c)
 def close(self):
     """Disconnect the current session from vCenter.
     """
     Disconnect(self.service_instance)
Ejemplo n.º 10
0
 def __disconnect(self):
     Disconnect(self.server)
Ejemplo n.º 11
0
 def disconnect(self) -> None:
     """
     Disconnect from vCenter. Does not matter if there is a connection.
     """
     if self.api is not None:
         Disconnect(self.api)
def main():
    """
    function runs all of the other functions. Some parts of this function
    are taken from the getallvms.py script from the pyvmomi gihub repo
    """
    args = get_args()
    try:
        si = None
        try:
            context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
            context.verify_mode = ssl.CERT_NONE
            si = SmartConnect(host=args.host,
                              user=args.user,
                              pwd=args.password,
                              port=int(args.port),
                              sslContext=context)
        except IOError, e:
            pass

        if not si:
            print "Could not connect to the specified host using " \
                  "specified username and password"
            return -1

        atexit.register(Disconnect, si)

        content = si.RetrieveContent()
        datacenter = content.rootFolder.childEntity[0]
        datastores = datacenter.datastore
        vmfolder = datacenter.vmFolder
        vmlist = vmfolder.childEntity
        dsvmkey = []

        # each datastore found on ESXi host or vCenter is passed
        # to the find_vmx and examine_vmx functions to find all
        # VMX files and search them

        for ds in datastores:
            find_vmx(ds.browser, "[%s]" % ds.summary.name, datacenter.name,
                     ds.summary.name)
            examine_vmx(ds.summary.name)
            updatevmx_path()

        # each VM found in the inventory is passed to the getvm_info
        # function to get it's instanceuuid

        for vm in vmlist:
            getvm_info(vm)

        # each key from the DS_VM hashtable is added to a separate
        # list for comparison later

        for a in DS_VM.keys():
            dsvmkey.append(a)

        # each uuid in the dsvmkey list is passed to the find_match
        # function to look for a match

        print "The following virtual machine(s) do not exist in the " \
              "inventory, but exist on a datastore " \
              "(Display Name, Datastore/Folder name):"
        for match in dsvmkey:
            find_match(match)
        Disconnect(si)
Ejemplo n.º 13
0
 def disconnect_from_vcenter(self, si):
     Disconnect(si)
Ejemplo n.º 14
0
 def disconnect(self):
     if self._si:
         Disconnect(self.si)
Ejemplo n.º 15
0
 def disconnect(self):
     if self.connection == None:
         print("no connection to close")
     else:
         print("closing connection")
         Disconnect(self.connection)
Ejemplo n.º 16
0
def logout(si):  # pragma: no cover
    Disconnect(si)
Ejemplo n.º 17
0
                 user=username,
                 pwd=password,
                 port=vcenter_port)

# With this we are searching for the MOID of the VM to clone from
template_vm = vmutils.get_vm_by_name(s, template_name)

# This gets the MOID of the Guest Customization Spec that is saved in the vCenter DB
guest_customization_spec = s.content.customizationSpecManager.GetCustomizationSpec(
    name=customization_spec_name)

# This will retrieve the Cluster MOID
cluster = vmutils.get_cluster(s, cluster_name)

# This constructs the reloacate spec needed in a later step by specifying the default resource pool (name=Resource) of the Cluster
# Alternatively one can specify a custom resource pool inside of a Cluster
relocate_spec = vim.vm.RelocateSpec(pool=cluster.resourcePool)

# This constructs the clone specification and adds the customization spec and location spec to it
cloneSpec = vim.vm.CloneSpec(powerOn=True,
                             template=False,
                             location=relocate_spec,
                             customization=guest_customization_spec.spec)

# Finally this is the clone operation with the relevant specs attached
clone = template_vm.Clone(name=new_vm_name,
                          folder=template_vm.parent,
                          spec=cloneSpec)

Disconnect(s)
Ejemplo n.º 18
0
def main():
    """
    Simple program for cloning a virtual machine.
    """

    # first we get the params from Ansible playbook
    fields = {
        "vmware_host": {
            "required": True,
            "type": "str"
        },
        "vmware_user": {
            "required": True,
            "type": "str"
        },
        "vmware_pwd": {
            "required": True,
            "type": "str"
        },
        "vmware_port": {
            "required": True,
            "type": "str"
        },
        "vmware_dest_vm": {
            "required": True,
            "type": "str"
        },
    }
    ansiblemodule = AnsibleModule(argument_spec=fields)

    context = None
    if hasattr(ssl, '_create_unverified_context'):
        context = ssl._create_unverified_context()

    # no we connect to Vmware with the info we received
    si = SmartConnect(host=ansiblemodule.params['vmware_host'],
                      user=ansiblemodule.params['vmware_user'],
                      pwd=ansiblemodule.params['vmware_pwd'],
                      port=int(ansiblemodule.params['vmware_port']),
                      sslContext=context)

    if not si:
        ansiblemodule.fail_json(
            msg=
            "Could not connect to the specified host using specified username and password"
        )

    atexit.register(Disconnect, si)

    content = si.RetrieveContent()

    # now we get info of the current vm we want to delete
    current_vm = get_obj(content, [vim.VirtualMachine],
                         ansiblemodule.params['vmware_dest_vm'])

    if current_vm:
        # first we stop it if still running
        if format(current_vm.runtime.powerState) == "poweredOn":
            task = current_vm.PowerOffVM_Task()
            result = wait_for_task(task)
            if result['task_done'] is not True:
                ansiblemodule.fail_json(msg="Something failed stopping the VM")

        # now we delete it
        task = current_vm.Destroy_Task()
        result = wait_for_task(task)
        if result['task_done']:
            ansiblemodule.exit_json(changed=True,
                                    msg="The current VM has been deleted")
        else:
            ansiblemodule.fail_json(msg="Something failed deleting the VM")

    else:
        ansiblemodule.exit_json(changed=False,
                                msg="Destination VM not found, skipping...")

    Disconnect(si)
Ejemplo n.º 19
0
    def close(self):
        """Close connection."""
        super(Connection, self).close()

        Disconnect(self._si)
        self._connected = False
Ejemplo n.º 20
0
 def disconnect(self):
     """Disconnect from the vSphere vCenter."""
     if self.si is not None:
         Disconnect(self.si)
         self.si = None
         self.content = None
def collect_esxi_data(host, user, pwd, ssl, es):

    #now = datetime.datetime.now()
    global dict_list

    global printVM
    global printDatastore
    global printHost

    try:
        collection_time = datetime.now(pytz.utc).replace(microsecond=0)

        today = time.strftime("%Y-%m-%d %H:%M")
        si = SmartConnect(host=host, user=user, pwd=pwd, sslContext=ssl)
        print('Collecting Information at :', today)

        content = si.RetrieveContent()

        #disconnect the connection when program exists
        #atexit.register(Disconnect, si)
        #atexit.register(endit)

        # Get vCenter date and time for use as baseline when querying for counters
        vchtime = si.CurrentTime()

        perf_dict = create_perf_dictionary(content)

        container = content.rootFolder  # starting point to look into
        viewType = [vim.VirtualMachine]  # object types to look for
        recursive = True  # whether we should look into it recursively

        containerView = content.viewManager.CreateContainerView(
            container, viewType, recursive)

        children = containerView.view
        c1 = 0
        for child in children:
            print_vm_info(child)
            c1 += 1

        c2 = 0
        if printVM:
            vmProps = get_properties(content, [vim.VirtualMachine],
                                     ['name', 'runtime.powerState'],
                                     vim.VirtualMachine)
            for vm in vmProps:
                if vm['runtime.powerState'] == "poweredOn":

                    #print("VM Name in Power ON : ",vm["name"])

                    vm_moref = vm['moref']
                    guest, cpu, mem = vm_core(vm_moref)  #core information
                    #vmGuest1.append(guest)
                    vmNumCPU.append(cpu)

                    s = re.findall(r"[-+]?\d*\.\d+|\d+", mem)

                    if "GB" in mem:
                        memKBytes = float(s[0]) * 131072
                    elif "MB" in mem:
                        memKBytes = float(s[0]) * 1024
                    elif "KB" in mem:
                        memKBytes = float(s[0])
                    else:
                        memKBytes = float(s[0]) * 0.00097656

                    vmMemory.append(memKBytes)

                    status, state = vm_status(vm_moref)  #status information
                    #print("State : ", state)
                    vmStatus.append(status)
                    #print("State in P on:", state)
                    vmState.append(1)

                    CPUready = vm_cpu_ready(vm_moref, content, vchtime,
                                            perf_dict)
                    vmCPUready.append(CPUready)

                    CPUusage = vm_cpu_usage(vm_moref, content, vchtime,
                                            perf_dict)
                    vmCPUusage.append(CPUusage)

                    MEMactive = vm_mem_active(vm_moref, content, vchtime,
                                              perf_dict)
                    vmMEMactive.append(MEMactive)

                    MEMshared = vm_mem_shared(vm_moref, content, vchtime,
                                              perf_dict)
                    vmMEMshared.append(MEMshared)

                    MEMballoon = vm_mem_balloon(vm_moref, content, vchtime,
                                                perf_dict)
                    vmMEMballoon.append(MEMballoon)

                    DS_readIO, DS_writeIO, DS_finalIO = vm_ds_io(
                        vm_moref, content, vchtime, perf_dict)

                    vmDS_readIO.append(DS_readIO)
                    vmDS_writeIO.append(DS_writeIO)
                    vmDS_finalIO.append(DS_finalIO)

                    DS_readLatency, DS_writeLatency, totalLatency = vm_ds_latency(
                        vm_moref, content, vchtime, perf_dict)
                    vmDS_readLatency.append(DS_readLatency)
                    vmDS_writeLatency.append(DS_writeLatency)
                    vmDS_totalLatency.append(totalLatency)

                    NetUsageRx, NetUsageTx, NetUsageTotal = vm_net_usage(
                        vm_moref, content, vchtime, perf_dict)
                    vm_NetUsageRx.append(NetUsageRx)
                    vm_NetUsageTx.append(NetUsageTx)
                    vm_NetUsageTotal.append(NetUsageTotal)
                    #break
                    c2 += 1

                else:

                    #print("VM Name in Power OFF : ",vm["name"])

                    vm_moref = vm['moref']
                    guest, cpu, mem = vm_core(vm_moref)  #core information
                    #vmGuest1.append(guest)
                    vmNumCPU.append(cpu)

                    s = re.findall(r"[-+]?\d*\.\d+|\d+", mem)

                    if "GB" in mem:
                        memKBytes = float(s[0]) * 131072
                    elif "MB" in mem:
                        memKBytes = float(s[0]) * 1024
                    elif "KB" in mem:
                        memKBytes = float(s[0])
                    else:
                        memKBytes = float(s[0]) * 0.00097656

                    vmMemory.append(memKBytes)

                    status, state = vm_status(vm_moref)  #status information
                    vmStatus.append(status)
                    #print("State in P off:", state)
                    vmState.append(0)

                    #CPUready = vm_cpu_ready(vm_moref, content, vchtime, perf_dict)
                    vmCPUready.append(0)

                    #CPUusage = vm_cpu_usage(vm_moref, content, vchtime, perf_dict)
                    vmCPUusage.append(0)

                    #MEMactive = vm_mem_active(vm_moref, content, vchtime, perf_dict)
                    vmMEMactive.append(0)

                    #MEMshared = vm_mem_shared(vm_moref, content, vchtime, perf_dict)
                    vmMEMshared.append(0)

                    #MEMballoon = vm_mem_balloon(vm_moref, content, vchtime, perf_dict)
                    vmMEMballoon.append(0)

                    #DS_readIO , DS_writeIO, DS_finalIO = vm_ds_io(vm_moref, content, vchtime, perf_dict)

                    vmDS_readIO.append(0)
                    vmDS_writeIO.append(0)
                    vmDS_finalIO.append(0)

                    #DS_readLatency,DS_writeLatency, totalLatency = vm_ds_latency(vm_moref, content, vchtime, perf_dict)
                    vmDS_readLatency.append(0)
                    vmDS_writeLatency.append(0)
                    vmDS_totalLatency.append(0)

                    #NetUsageRx,NetUsageTx, NetUsageTotal = vm_net_usage(vm_moref, content, vchtime, perf_dict)
                    vm_NetUsageRx.append(0)
                    vm_NetUsageTx.append(0)
                    vm_NetUsageTotal.append(0)

            d = {}
            #print("VM state list" , vmState)
            header = [
                "@Timestamp", "vmName", "vmTemplate", "vmPath", "vmDSlocation",
                "vmGuest", "vmInstanceUUID", "vmBioUUID", "vmIP",
                "VMwareTools", "vmNumCPU", "vmMemory", "vmStatus", "vmState",
                "vmCPUready", "vmCPUusage", "vmMEMactive", "vmMEMshared",
                "vmMEMballoon", "vmDS_readIO", "vmDS_writeIO", "vmDS_finalIO",
                "vmDS_readLatency", "vmDS_writeLatency", "vmDS_totalLatency",
                "vm_NetUsageRx", "vm_NetUsageTx", "vm_NetUsageTotal"
            ]

            #for i in range(0,len(vmState)):
            #    if vmState[i] == "poweredOn" :
            #        vmState[i] = 1
            #    else :
            #        vmState[i] = 0

            for i in range(0, len(vmName)):

                VMLogger.info(
                    "{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}"
                    .format(collection_time, vmName[i], vmTemplate[i],
                            vmPath[i], vmDSlocation[i], vmGuest[i],
                            vmInstanceUUID[i], vmBioUUID[i], vmIP[i],
                            VMwareTools[i], vmNumCPU[i], vmMemory[i],
                            vmStatus[i], vmState[i], vmCPUready[i],
                            vmCPUusage[i], vmMEMactive[i], vmMEMshared[i],
                            vmMEMballoon[i], vmDS_readIO[i], vmDS_writeIO[i],
                            vmDS_finalIO[i], vmDS_readLatency[i],
                            vmDS_writeLatency[i], vmDS_totalLatency[i],
                            vm_NetUsageRx[i], vm_NetUsageTx[i],
                            vm_NetUsageTotal[i]))

                d["@Timestamp"] = collection_time
                d["vmName"] = vmName[i]
                d["vmTemplate"] = vmTemplate[i]
                d["vmGuest"] = vmGuest[i]
                d["vmInstanceUUID"] = vmInstanceUUID[i]
                d["vmBioUUID"] = vmBioUUID[i]
                d["vmIP"] = vmIP[i]
                d["VMwareTools"] = VMwareTools[i]
                #d["vmGuest1"] = vmGuest1
                d["vmNumCPU"] = vmNumCPU[i]
                d["vmMemory"] = vmMemory[i]
                d["vmStatus"] = vmStatus[i]
                d["vmState"] = vmState[i]
                d["vmCPUready"] = vmCPUready[i]
                d["vmCPUusage"] = vmCPUusage[i]
                d["vmMEMactive"] = vmMEMactive[i]
                d["vmMEMshared"] = vmMEMshared[i]
                d["vmMEMballoon"] = vmMEMballoon[i]
                d["vmDS_readIO"] = vmDS_readIO[i]
                d["vmDS_writeIO"] = vmDS_writeIO[i]
                d["vmDS_finalIO"] = vmDS_finalIO[i]
                d["vmDS_readLatency"] = vmDS_readLatency[i]
                d["vmDS_writeLatency"] = vmDS_writeLatency[i]
                d["vmDS_totalLatency"] = vmDS_totalLatency[i]
                d["vm_NetUsageRx"] = vm_NetUsageRx[i]
                d["vm_NetUsageTx"] = vm_NetUsageTx[i]
                d["vm_NetUsageTotal"] = vm_NetUsageTotal[i]

                es.index(index="vm-index", doc_type="log", body=d)

            #dict_list.append(d)

        if printHost:

            hostProps = get_properties(content, [vim.HostSystem], ['name'],
                                       vim.HostSystem)
            hostNameLIST, hostModelLIST, hostCPULIST, hostCPUcoresLIST, hostnumThreadsLIST, hostMEMsizeLIST, hostCPUusageLIST, hostMEMusageLIST = (
                [] for i in range(8))

            for host in hostProps:
                #print("For Host :")
                #print("---------------------------------------------------------")
                host_moref = host['moref']
                hostName, hostModel, hostCPU, hostCPUcores, hostnumThreads, hostMEMsize = host_core(
                    host_moref)

                hostNameLIST.append(hostName)
                hostModelLIST.append(hostModel)
                hostCPULIST.append(hostCPU)
                hostCPUcoresLIST.append(hostCPUcores)
                hostnumThreadsLIST.append(hostnumThreads)
                hostMEMsizeLIST.append(hostMEMsize)

                hostCPUusage = host_cpu_usage(host_moref)
                hostCPUusageLIST.append(hostCPUusage)
                hostMEMusage = host_mem_usage(host_moref)
                hostMEMusageLIST.append(hostMEMusage)

            #print("Connected to VMware vCenter Server !")

            #print("hostNameLIST \n" , hostNameLIST)
            #print("hostModelLIST \n" , hostModelLIST)
            #print("hostCPULIST \n" , hostCPULIST)
            #print("hostnumThreadsLIST \n", hostnumThreadsLIST)
            #print("hostMEMsizeLIST \n" , hostMEMsizeLIST)
            #print("hostCPUusageLIST",hostCPUusageLIST)
            #print("hostMEMusageLIST\n",hostMEMusageLIST)

            printHost = False
            dh = {}
            for i in range(0, len(hostNameLIST)):
                dh["hostname"] = hostNameLIST[i]
                dh["host_model"] = hostModelLIST[i]
                dh["host_CPU"] = hostCPULIST[i]
                dh["host_CPU_core"] = hostCPUcoresLIST[i]
                dh["host_CPU_thread"] = hostnumThreadsLIST[i]
                dh["host_mem_size"] = hostMEMsizeLIST[i]
                dh["host_MEM_usage"] = hostMEMusageLIST[i]
                dh["host_CPU_usage"] = hostCPUusageLIST[i]

                HOSTlogger.info("{},{},{},{},{},{},{},{},{}".format(
                    collection_time, hostNameLIST[i], hostModelLIST[i],
                    hostCPULIST[i], hostCPUcoresLIST[i], hostnumThreadsLIST[i],
                    hostMEMsizeLIST[i], hostMEMusageLIST[i],
                    hostCPUusageLIST[i]))

            es.index(index="host-index", doc_type="log", body=dh)

        if printDatastore:
            dsProps = get_properties(content, [vim.Datastore], ['name'],
                                     vim.Datastore)

            dd = {}

            for datastore in dsProps:
                ds_moref = datastore['moref']
                ds_status(ds_moref)
                datastore_name = datastore["name"]
                datastore_capacity, datastore_free, datastore_used_pct, datastore_uncommited_space = ds_space(
                    ds_moref)
                datastore_name_lst.append(datastore_name)
                datastore_capacity_lst.append(datastore_capacity)
                datastore_free_lst.append(datastore_free)
                datastore_used_pct_lst.append(datastore_used_pct)
                datastore_uncommited_space_lst.append(
                    datastore_uncommited_space)

            for i in range(0, len(datastore_name_lst)):
                dd["ds_name"] = datastore_name_lst[i]
                dd["ds_capacity"] = datastore_capacity_lst[i]
                dd["ds_free"] = datastore_free_lst[i]
                dd["ds_used_pct"] = datastore_used_pct_lst[i]
                dd["ds_uncommitted_space"] = datastore_uncommited_space_lst[i]
                DSlogger.info("{},{},{},{},{},{}".format(
                    collection_time, datastore_name_lst[i],
                    datastore_capacity_lst[i], datastore_free_lst[i],
                    datastore_used_pct_lst[i],
                    datastore_uncommited_space_lst[i]))

            es.index(index="datastore-index", doc_type="log", body=dd)

        if os.path.exists("/home/akhilesh/Desktop/vcenter_perfdic.txt"):
            os.remove('/home/akhilesh/Desktop/vcenter_perfdic.txt')

        if os.path.exists('/home/akhilesh/Desktop/host_perfdic.txt'):
            os.remove('/home/akhilesh/Desktop/host_perfdic.txt')

        Disconnect(si)
        endit()

    except IOError as e:
        print("I/O error({0}): {1}".format(e.errno, e.strerror))
Ejemplo n.º 22
0
 def __exit__(self, type, value, traceback):
     Disconnect(self._si)
Ejemplo n.º 23
0
         hostName=vim.vm.customization.FixedName(name=new_vm_name))
     customspec = vim.vm.customization.Specification(
         nicSettingMap=[adaptermap],
         globalIPSettings=globalip,
         identity=ident)
     resource_pool = vmutils.get_resource_pool(si, pool_resource)
     relocateSpec = vim.vm.RelocateSpec(pool=resource_pool)
     cloneSpec = vim.vm.CloneSpec(powerOn=True,
                                  template=False,
                                  location=relocateSpec,
                                  customization=None,
                                  config=vmconf)
     clone = template_vm.Clone(name=new_vm_name,
                               folder=template_vm.parent,
                               spec=cloneSpec)
     Disconnect(si)
     line_counter += 1
 else:
     vcenter = i[0]
     username = i[1]
     password = i[2]
     template_name = i[3]
     new_vm_name = i[4]
     pool_resource = i[5]
     cpu_number = i[6]
     memory_on_mb = i[7]
     si = None
     try:
         si = SmartConnect(host=vcenter,
                           user=username,
                           pwd=password,
Ejemplo n.º 24
0
 def __exit__(self, type, value, traceback):
     _ = Disconnect(self.conn)
Ejemplo n.º 25
0
def collect_esxi_data(host, user, pwd, ssl):

    now = datetime.datetime.now()

    try:
        si = SmartConnect(host=host, user=user, pwd=pwd, sslContext=ssl)
        print('Collecting Information at : ', str(now))

        content = si.RetrieveContent()

        #disconnect the connection when program exists
        #atexit.register(Disconnect, si)
        #atexit.register(endit)

        # Get vCenter date and time for use as baseline when querying for counters
        vchtime = si.CurrentTime()

        perf_dict = create_perf_dictionary(content)

        container = content.rootFolder  # starting point to look into
        viewType = [vim.VirtualMachine]  # object types to look for
        recursive = True  # whether we should look into it recursively

        containerView = content.viewManager.CreateContainerView(
            container, viewType, recursive)

        children = containerView.view
        c1 = 0
        for child in children:
            print_vm_info(child)
            c1 += 1

        c2 = 0
        if printVM:
            vmProps = get_properties(content, [vim.VirtualMachine],
                                     ['name', 'runtime.powerState'],
                                     vim.VirtualMachine)
            for vm in vmProps:
                if vm['runtime.powerState'] == "poweredOn":

                    #print("VM Name : ",vm["name"])

                    vm_moref = vm['moref']
                    guest, cpu, mem = vm_core(vm_moref)  #core information
                    vmGuest1.append(guest)
                    vmNumCPU.append(cpu)

                    s = re.findall(r"[-+]?\d*\.\d+|\d+", mem)

                    if "GB" in mem:
                        memKBytes = float(s[0]) * 131072
                    elif "MB" in mem:
                        memKBytes = float(s[0]) * 1024
                    elif "KB" in mem:
                        memKBytes = float(s[0])
                    else:
                        memKBytes = float(s[0]) * 0.00097656

                    vmMemory.append(memKBytes)

                    status, state = vm_status(vm_moref)  #status information
                    vmStatus.append(status)
                    vmState.append(state)

                    CPUready = vm_cpu_ready(vm_moref, content, vchtime,
                                            perf_dict)
                    vmCPUready.append(CPUready)

                    CPUusage = vm_cpu_usage(vm_moref, content, vchtime,
                                            perf_dict)
                    vmCPUusage.append(CPUusage)

                    MEMactive = vm_mem_active(vm_moref, content, vchtime,
                                              perf_dict)
                    vmMEMactive.append(MEMactive)

                    MEMshared = vm_mem_shared(vm_moref, content, vchtime,
                                              perf_dict)
                    vmMEMshared.append(MEMshared)

                    MEMballoon = vm_mem_balloon(vm_moref, content, vchtime,
                                                perf_dict)
                    vmMEMballoon.append(MEMballoon)

                    DS_readIO, DS_writeIO, DS_finalIO = vm_ds_io(
                        vm_moref, content, vchtime, perf_dict)

                    vmDS_readIO.append(DS_readIO)
                    vmDS_writeIO.append(DS_writeIO)
                    vmDS_finalIO.append(DS_finalIO)

                    DS_readLatency, DS_writeLatency, totalLatency = vm_ds_latency(
                        vm_moref, content, vchtime, perf_dict)
                    vmDS_readLatency.append(DS_readLatency)
                    vmDS_writeLatency.append(DS_writeLatency)
                    vmDS_totalLatency.append(totalLatency)

                    NetUsageRx, NetUsageTx, NetUsageTotal = vm_net_usage(
                        vm_moref, content, vchtime, perf_dict)
                    vm_NetUsageRx.append(NetUsageRx)
                    vm_NetUsageTx.append(NetUsageTx)
                    vm_NetUsageTotal.append(NetUsageTotal)
                    #break
                    c2 += 1

        for i in range(0, c2):
            VMLogger.info(
                "{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}"
                .format(collection_time, vmName[i], vmTemplate[i], vmPath[i],
                        vmDSlocation[i], vmGuest[i], vmInstanceUUID[i],
                        vmBioUUID[i], vmIP[i], VMwareTools[i], vmGuest1[i],
                        vmNumCPU[i], vmMemory[i], vmStatus[i], vmState[i],
                        vmCPUready[i], vmCPUusage[i], vmMEMactive[i],
                        vmMEMshared[i], vmMEMballoon[i], vmDS_readIO[i],
                        vmDS_writeIO[i], vmDS_finalIO[i], vmDS_readLatency[i],
                        vmDS_writeLatency[i], vmDS_totalLatency[i],
                        vm_NetUsageRx[i], vm_NetUsageTx[i],
                        vm_NetUsageTotal[i]))

        with open('vmlog.log') as f:
            reader = csv.DictReader(f)
            helpers.bulk(es, reader, index='vm-index', doc_type='log')

        Disconnect(si)
        endit()

    except IOError as e:
        print("I/O error({0}): {1}".format(e.errno, e.strerror))
Ejemplo n.º 26
0
def ext_pillar(minion_id, pillar, **kwargs):  # pylint: disable=W0613
    """
    Check vmware/vcenter for all data
    """
    vmware_pillar = {}
    host = None
    username = None
    password = None
    property_types = []
    property_name = "name"
    protocol = None
    port = None
    pillar_key = "vmware"
    replace_default_attributes = False
    type_specific_pillar_attributes = {
        "VirtualMachine": [
            {
                "config": [
                    "version",
                    "guestId",
                    "files",
                    "tools",
                    "flags",
                    "memoryHotAddEnabled",
                    "cpuHotAddEnabled",
                    "cpuHotRemoveEnabled",
                    "datastoreUrl",
                    "swapPlacement",
                    "bootOptions",
                    "scheduledHardwareUpgradeInfo",
                    "memoryAllocation",
                    "cpuAllocation",
                ]
            },
            {
                "summary": [
                    {
                        "runtime": [{
                            "host": ["name", {
                                "parent": "name"
                            }]
                        }, "bootTime"]
                    },
                    {
                        "guest": [
                            "toolsStatus",
                            "toolsVersionStatus",
                            "toolsVersionStatus2",
                            "toolsRunningStatus",
                        ]
                    },
                    {
                        "config": ["cpuReservation", "memoryReservation"]
                    },
                    {
                        "storage": ["committed", "uncommitted", "unshared"]
                    },
                    {
                        "dasVmProtection": ["dasProtected"]
                    },
                ]
            },
            {
                "storage": [{
                    "perDatastoreUsage": [
                        {
                            "datastore": "name"
                        },
                        "committed",
                        "uncommitted",
                        "unshared",
                    ]
                }]
            },
        ],
        "HostSystem": [
            {
                "datastore": [
                    "name",
                    "overallStatus",
                    {
                        "summary": [
                            "url",
                            "freeSpace",
                            "maxFileSize",
                            "maxVirtualDiskCapacity",
                            "maxPhysicalRDMFileSize",
                            "maxVirtualRDMFileSize",
                            {
                                "vmfs": [
                                    "capacity",
                                    "blockSizeMb",
                                    "maxBlocks",
                                    "majorVersion",
                                    "version",
                                    "uuid",
                                    {
                                        "extent": ["diskName", "partition"]
                                    },
                                    "vmfsUpgradeable",
                                    "ssd",
                                    "local",
                                ],
                            },
                        ],
                    },
                    {
                        "vm": "name"
                    },
                ]
            },
            {
                "vm": [
                    "name",
                    "overallStatus",
                    {
                        "summary": [{
                            "runtime": "powerState"
                        }]
                    },
                ]
            },
        ],
    }
    pillar_attributes = [
        {
            "summary": ["overallStatus"]
        },
        {
            "network":
            ["name", {
                "config": {
                    "distributedVirtualSwitch": "name"
                }
            }]
        },
        {
            "datastore": ["name"]
        },
        {
            "parent": ["name"]
        },
    ]

    if "pillar_key" in kwargs:
        pillar_key = kwargs["pillar_key"]
    vmware_pillar[pillar_key] = {}

    if "host" not in kwargs:
        log.error(
            "VMWare external pillar configured but host is not specified in ext_pillar configuration."
        )
        return vmware_pillar
    else:
        host = kwargs["host"]
        log.debug("vmware_pillar -- host = %s", host)

    if "username" not in kwargs:
        log.error(
            "VMWare external pillar requested but username is not specified in ext_pillar configuration."
        )
        return vmware_pillar
    else:
        username = kwargs["username"]
        log.debug("vmware_pillar -- username = %s", username)

    if "password" not in kwargs:
        log.error(
            "VMWare external pillar requested but password is not specified in ext_pillar configuration."
        )
        return vmware_pillar
    else:
        password = kwargs["password"]
        log.debug("vmware_pillar -- password = %s", password)

    if "replace_default_attributes" in kwargs:
        replace_default_attributes = kwargs["replace_default_attributes"]
        if replace_default_attributes:
            pillar_attributes = []
            type_specific_pillar_attributes = {}

    if "property_types" in kwargs:
        for prop_type in kwargs["property_types"]:
            if isinstance(prop_type, dict):
                next_prop_type_key = next(iter(prop_type))
                property_types.append(getattr(vim, next_prop_type_key))
                if isinstance(prop_type[next_prop_type_key], list):
                    pillar_attributes = (pillar_attributes +
                                         prop_type[next_prop_type_key])
                else:
                    log.warning(
                        "A property_type dict was specified, but its value is not a list"
                    )
            else:
                property_types.append(getattr(vim, prop_type))
    else:
        property_types = [vim.VirtualMachine]
    log.debug("vmware_pillar -- property_types = %s", property_types)

    if "property_name" in kwargs:
        property_name = kwargs["property_name"]
    else:
        property_name = "name"
    log.debug("vmware_pillar -- property_name = %s", property_name)

    if "protocol" in kwargs:
        protocol = kwargs["protocol"]
        log.debug("vmware_pillar -- protocol = %s", protocol)

    if "port" in kwargs:
        port = kwargs["port"]
        log.debug("vmware_pillar -- port = %s", port)

    virtualgrain = None
    osgrain = None
    if "virtual" in __grains__:
        virtualgrain = __grains__["virtual"].lower()
    if "os" in __grains__:
        osgrain = __grains__["os"].lower()

    if virtualgrain == "vmware" or osgrain == "vmware esxi" or osgrain == "esxi":
        vmware_pillar[pillar_key] = {}
        try:
            _conn = salt.utils.vmware.get_service_instance(
                host,
                username,
                password,
                protocol,
                port,
                verify_ssl=kwargs.get("verify_ssl", True),
            )
            if _conn:
                data = None
                for prop_type in property_types:
                    data = salt.utils.vmware.get_mor_by_property(
                        _conn,
                        prop_type,
                        minion_id,
                        property_name=property_name)
                    if data:
                        type_name = type(data).__name__.replace("vim.", "")
                        if hasattr(data, "availableField"):
                            vmware_pillar[pillar_key]["annotations"] = {}
                            for availableField in data.availableField:
                                for customValue in data.customValue:
                                    if availableField.key == customValue.key:
                                        vmware_pillar[pillar_key][
                                            "annotations"][
                                                availableField.
                                                name] = customValue.value
                        type_specific_pillar_attribute = []
                        if type_name in type_specific_pillar_attributes:
                            type_specific_pillar_attribute = type_specific_pillar_attributes[
                                type_name]
                        vmware_pillar[pillar_key] = dictupdate.update(
                            vmware_pillar[pillar_key],
                            _crawl_attribute(
                                data, pillar_attributes +
                                type_specific_pillar_attribute),
                        )
                        break
                # explicitly disconnect from vCenter when we are done, connections linger idle otherwise
                Disconnect(_conn)
            else:
                log.error(
                    "Unable to obtain a connection with %s, please verify "
                    "your vmware ext_pillar configuration",
                    host,
                )
        except RuntimeError:
            log.error(
                "A runtime error occurred in the vmware_pillar, "
                "this is likely caused by an infinite recursion in "
                "a requested attribute.  Verify your requested attributes "
                "and reconfigure the pillar.")

        return vmware_pillar
    else:
        return {}
Ejemplo n.º 27
0
from pyVmomi import vim
from pyVim.connect import SmartConnect, Disconnect
import vmutils
import ssl

s = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
s.verify_mode = ssl.CERT_NONE
c = None
try:
    c = SmartConnect(host="183.82.41.58", user="******", pwd='Nexii@123')
    print('Valid certificate')
except:
    c = SmartConnect(host="183.82.41.58",
                     user="******",
                     pwd='Nexii@123',
                     sslContext=s)
    print "successfully connected"

# Finding source VM
template_vm = vmutils.get_vm_by_name(si, 'centos-6.5-x64')

Disconnect(c)
print "successfully disconnected"
Ejemplo n.º 28
0
def main():
    """
    function runs all of the other functions. Some parts of this function
    are taken from the getallvms.py script from the pyvmomi gihub repo
    """
    args = get_args()
    update_date_in_past(args.days)
    try:
        si = None
        try:
            si = SmartConnect(host=args.host,
                              user=args.user,
                              pwd=args.password,
                              port=int(args.port))
        except IOError as e:
            pass

        if not si:
            print("Could not connect to the specified host using " \
                  "specified username and password")
            return -1

        atexit.register(Disconnect, si)

        content = si.RetrieveContent()

        datacenters = content.rootFolder.childEntity
        target_datacenter = None
        for dc in datacenters:
            if dc.name == args.datacenter:
                target_datacenter = dc
                break
        if target_datacenter == None:
            print("Couldn't find a datacenter named '%s'" % args.datacenter)
            return -1

        datastores = target_datacenter.datastore
        target_datastore = None
        for ds in datastores:
            if ds.summary.name == args.datastore:
                target_datastore = ds
                break
        if target_datastore == None:
            print("Couldn't find a datastore named '%s'" % args.datastore)
            return -1

        vmfolder = target_datacenter.vmFolder
        vmlist = vmfolder.childEntity
        dsvmkey = []

        find_vmx(target_datastore.browser,
                 "[%s]" % target_datastore.summary.name,
                 target_datacenter.name, target_datastore.summary.name)
        examine_vmx(target_datastore.summary.name)
        updatevmx_path()

        # each VM found in the inventory is passed to the getvm_info
        # function to get it's instanceuuid

        for vm in vmlist:
            getvm_info(vm)

        # each uuid in the dsvmkey list is passed to the find_match
        # function to look for a match

        print("The following virtual machine(s) do not exist in the " \
              "inventory, but exist on a datastore " \
              "(Display Name, Datastore/Folder name):")
        for match in dsvmkey:
            find_match(match)

        Disconnect(si)
    except vmodl.MethodFault as e:
        print("Caught vmodl fault : " + e.msg)
        return -1
    except Exception as e:
        print("Caught exception : " + str(e))
        return -1

    return 0
Ejemplo n.º 29
0
 def _disconnect(self):
     try:
         Disconnect(self._connection)
     except:
         pass
Ejemplo n.º 30
0
def disConnect(): 
	Disconnect(si)
	print "successfully disconnected"