Esempio n. 1
0
    def generate_rp_config(self):
        rp_spec = vim.ResourceConfigSpec()
        cpu_alloc = vim.ResourceAllocationInfo()
        cpu_alloc.expandableReservation = self.cpu_expandable_reservations
        cpu_alloc.limit = self.cpu_limit
        cpu_alloc.reservation = self.cpu_reservation
        cpu_alloc_shares = vim.SharesInfo()
        if self.cpu_shares == 'custom':
            cpu_alloc_shares.shares = self.cpu_allocation_shares
        cpu_alloc_shares.level = self.cpu_shares
        cpu_alloc.shares = cpu_alloc_shares
        rp_spec.cpuAllocation = cpu_alloc

        mem_alloc = vim.ResourceAllocationInfo()
        mem_alloc.limit = self.mem_limit
        mem_alloc.expandableReservation = self.mem_expandable_reservations
        mem_alloc.reservation = self.mem_reservation
        mem_alloc_shares = vim.SharesInfo()
        if self.mem_shares == 'custom':
            mem_alloc_shares.shares = self.mem_allocation_shares
        mem_alloc_shares.level = self.mem_shares
        mem_alloc.shares = mem_alloc_shares
        rp_spec.memoryAllocation = mem_alloc

        return rp_spec
Esempio n. 2
0
    def state_add_rp(self):
        changed = True

        rp_spec = vim.ResourceConfigSpec()
        cpu_alloc = vim.ResourceAllocationInfo()
        cpu_alloc.expandableReservation = self.cpu_expandable_reservations
        cpu_alloc.limit = int(self.cpu_limit)
        cpu_alloc.reservation = int(self.cpu_reservation)
        cpu_alloc_shares = vim.SharesInfo()
        cpu_alloc_shares.level = self.cpu_shares
        cpu_alloc.shares = cpu_alloc_shares
        rp_spec.cpuAllocation = cpu_alloc
        mem_alloc = vim.ResourceAllocationInfo()
        mem_alloc.limit = int(self.mem_limit)
        mem_alloc.expandableReservation = self.mem_expandable_reservations
        mem_alloc.reservation = int(self.mem_reservation)
        mem_alloc_shares = vim.SharesInfo()
        mem_alloc_shares.level = self.mem_shares
        mem_alloc.shares = mem_alloc_shares
        rp_spec.memoryAllocation = mem_alloc

        self.dc_obj = find_datacenter_by_name(
            self.content, self.datacenter)
        self.cluster_obj = find_cluster_by_name_datacenter(
            self.dc_obj, self.cluster)
        rootResourcePool = self.cluster_obj.resourcePool
        rootResourcePool.CreateResourcePool(self.resource_pool, rp_spec)

        self.module.exit_json(changed=changed)
Esempio n. 3
0
def ADD_Pool(content, esxi, pool_name, cpu_limit, ram_limit):

    i = 0

    while True:

        try:
            hostname = content.rootFolder.childEntity[0].hostFolder.childEntity[i].name

            if hostname == esxi:

                if Search_Pool(content, esxi, pool_name) == True:
                    print(bcolors.WARNING + "Pool already exist" + bcolors.ENDC)
                    return

                host = content.rootFolder.childEntity[0].hostFolder.childEntity[i]

                configSpec = vim.ResourceConfigSpec()
                cpuAllocationInfo = vim.ResourceAllocationInfo()
                memAllocationInfo = vim.ResourceAllocationInfo()
                sharesInfo = vim.SharesInfo(level='normal')

                cpuAllocationInfo.reservation = int(cpu_limit / 2)
                cpuAllocationInfo.expandableReservation = False
                cpuAllocationInfo.shares = sharesInfo
                cpuAllocationInfo.limit = cpu_limit

                memAllocationInfo.reservation = int(ram_limit / 2)
                memAllocationInfo.expandableReservation = False
                memAllocationInfo.shares = sharesInfo
                memAllocationInfo.limit = ram_limit
                   
                configSpec.cpuAllocation = cpuAllocationInfo
                configSpec.memoryAllocation = memAllocationInfo

                try:
                    host.resourcePool.CreateResourcePool(pool_name, configSpec)  
                    print (bcolors.OKGREEN + "Pool successful created" + bcolors.ENDC)  
                    return  

                except:
                    print (bcolors.FAIL + "Pool NOT created" + bcolors.ENDC)  
                    return    

        except:
            print(bcolors.FAIL + "Host not exist" + bcolors.ENDC)
            return

        i = i + 1
Esempio n. 4
0
 def fix_resource_allocation(self,
                             vm_name,
                             cpu_limit=2000,
                             memory_limit=None):
     vm_mor = self.get_vm_mor(vm_name)
     memory_limit = memory_limit if memory_limit else \
         vm_mor.config.hardware.memoryMB
     shares = vim.SharesInfo(level="normal")
     cpu_allocation = vim.ResourceAllocationInfo(limit=cpu_limit,
                                                 shares=shares)
     memory_alloc = vim.ResourceAllocationInfo(limit=memory_limit,
                                               shares=shares)
     vm_mor.ReconfigVM_Task(
         vim.vm.ConfigSpec(cpuAllocation=cpu_allocation,
                           memoryAllocation=memory_alloc)).wait()
Esempio n. 5
0
    def _resource_allocation(self, config):
        spec = vim.ResourceAllocationInfo()

        if config is None:
            return spec

        shares_config = {
            "normal": vim.SharesInfo.Level.normal,
            "low": vim.SharesInfo.Level.low,
            "high": vim.SharesInfo.Level.high
        }
        spec.expandableReservation = config.get("expandableReservation", False)
        spec.limit = config.get("limit", -1)
        spec.reservation = config.get("reservation", 0)
        spec.shares = vim.SharesInfo()
        shares = config.get("shares", None)

        if shares:
            level = shares.get("level")
            if level == "custom":
                spec.shares.level = vim.SharesInfo.Level.custom
                spec.shares.shares = shares.get("shares")
            else:
                spec.shares.level = shares_config.get(level)

        return spec
Esempio n. 6
0
def create_vm_config_creator(host, args):
    spec = vim.vm.ConfigSpec()  # type: ignore
    files = vim.vm.FileInfo()  # type: ignore
    files.vmPathName = "[" + host.datastore[0].name + "]" + args.get('name')
    resource_allocation_spec = vim.ResourceAllocationInfo()  # type: ignore
    resource_allocation_info = vim.ResourceAllocationInfo()  # type: ignore
    resource_allocation_spec.limit = arg_to_number(args.get('cpu-allocation'))
    resource_allocation_info.limit = arg_to_number(args.get('memory'))
    spec.name = args.get('name')
    spec.numCPUs = arg_to_number(args.get('cpu-num'))
    spec.cpuAllocation = resource_allocation_spec
    spec.memoryAllocation = resource_allocation_info
    spec.memoryMB = arg_to_number(args.get('virtual-memory'))
    spec.files = files
    if args.get('guest_id'):
        spec.guestId = args.get('guest_id')
    return spec
Esempio n. 7
0
 def create_rp(self, name, esx_name, parent="/"):
     if self.check_pool_existence(name, esx_name):
         raise ExistenceException("Resource pool %s already exists "
                                  "on esx %s" % (name, esx_name))
     root_pool = self._get_pool_mor(parent, esx_name)
     cpu_alloc = vim.ResourceAllocationInfo(
         shares=vim.SharesInfo(level='normal'),
         limit=-1,
         expandableReservation=True,
         reservation=0)
     memory_alloc = vim.ResourceAllocationInfo(
         shares=vim.SharesInfo(level='normal'),
         limit=-1,
         expandableReservation=True,
         reservation=0)
     pool_spec = vim.ResourceConfigSpec(cpuAllocation=cpu_alloc,
                                        memoryAllocation=memory_alloc)
     try:
         root_pool.CreateResourcePool(name=name, spec=pool_spec)
     except vim.fault.DuplicateName as e:
         raise ExistenceException(e.msg)
Esempio n. 8
0
def TestVQATCreateSpec(vmname, memSize):
    """
   Create a spec for a VQAT VM, with all memory reserved.
   """
    Log("Creating a spec")
    cfg = vm.CreateQuickDummySpec(vmname,
                                  vmxVersion="vmx-18",
                                  memory=memSize,
                                  guest="otherGuest")

    memoryAlloc = vim.ResourceAllocationInfo()
    memoryAlloc.SetReservation(memSize)
    cfg.SetMemoryAllocation(memoryAlloc)
    return cfg
Esempio n. 9
0
    def make_resourcepool(self, cluster, resourcepool_name):
        """
        Create a new Resource Pool on a cluster.

        Arguments:
        :param cluster: the cluster to use (see `get_cluster`)
        :param resourcepool_name: the name for the new resource pool
        """
        rp_spec = vim.ResourceConfigSpec()
        rp_spec.cpuAllocation = vim.ResourceAllocationInfo()
        rp_spec.cpuAllocation.limit = -1  # No limit
        rp_spec.cpuAllocation.expandableReservation = True
        rp_spec.cpuAllocation.reservation = 1000  # MHz
        rp_spec.cpuAllocation.shares = vim.SharesInfo()
        rp_spec.cpuAllocation.shares.level = vim.SharesInfo.Level.normal
        rp_spec.memoryAllocation = vim.ResourceAllocationInfo()
        rp_spec.memoryAllocation.limit = -1  # No limit
        rp_spec.memoryAllocation.expandableReservation = True
        rp_spec.memoryAllocation.reservation = 256  # MiB
        rp_spec.memoryAllocation.shares = vim.SharesInfo()
        rp_spec.memoryAllocation.shares.level = vim.SharesInfo.Level.normal
        cluster.resourcePool.CreateResourcePool(
            name=resourcepool_name, spec=rp_spec)
Esempio n. 10
0
def resourcepool_update(service_instance, parent, name,
                        cpuexpandableReservation, cpulimit, cpureservation,
                        cpushares, cpulevel, memoryexpandableReservation,
                        memorylimit, memoryreservation, memoryshares,
                        memorylevel):

    cpuAllocation = vim.ResourceAllocationInfo()
    cpuAllocation.expandableReservation = cpuexpandableReservation
    cpuAllocation.limit = cpulimit
    cpuAllocation.reservation = int(cpureservation)
    cpuShareInfo = vim.SharesInfo()
    cpuShareInfo.shares = int(cpushares)
    cpuSharesLevel = vim.SharesLevel(cpulevel)
    cpuShareInfo.level = cpuSharesLevel
    cpuAllocation.shares = cpuShareInfo

    memoryAllocation = vim.ResourceAllocationInfo()
    memoryAllocation.expandableReservation = memoryexpandableReservation
    memoryAllocation.limit = memorylimit
    memoryAllocation.reservation = int(memoryreservation)
    memoryShareInfo = vim.SharesInfo()
    memoryShareInfo.shares = int(memoryshares)
    memorySharesLevel = vim.SharesLevel(memorylevel)
    memoryShareInfo.level = memorySharesLevel
    memoryAllocation.shares = memoryShareInfo

    rpspec = vim.ResourceConfigSpec()
    rpspec.cpuAllocation = cpuAllocation
    rpspec.memoryAllocation = memoryAllocation

    content = service_instance.RetrieveContent()
    for respool in get_vim_objects(content, vim.ResourcePool):
        if respool.name == name:
            print "Found resourcepool " + name
            newresp = respool.UpdateConfig(name, rpspec)
            print "Updated resourcepool " + name
Esempio n. 11
0
    def cpu_shares(self, shares):
        """
        Configure CPU shares for a VM

        Args:
            shares (int): CPU shares to be configured

        Returns:
            Task
        """
        assert shares >= 0
        config_spec = vim.vm.ConfigSpec()
        shares_alloc = vim.ResourceAllocationInfo()
        shares_alloc.shares = vim.SharesInfo(level="custom", shares=shares)
        config_spec.cpuAllocation = shares_alloc
        return self.vm_obj.ReconfigVM_Task(config_spec)
Esempio n. 12
0
    def flex_vm_cpu(self, si, vm, cpu, core=None, mhz=None, reserv=None):
        #flex vm memory
        # add check if hotadd is enabled
        cpu = int(cpu)
        if core != None:
            core = int(core)
        if vm.config.cpuHotAddEnabled == True or vm.runtime.powerState == 'poweredOff':
            resource_allocation_spec = vim.ResourceAllocationInfo()
            if reserv != None and mhz != None:
                reservraw = mhz / 100 * reserv
                reserv = int(reservraw)
                resource_allocation_spec.reservation = reserv
            elif reserv != None and mhz == None:
                print('Must specify both reservation and mhz')
            else:
                pass

            if mhz != None:
                resource_allocation_spec.limit = mhz
            else:
                pass

            config_spec = vim.vm.ConfigSpec()
            # this makes it pass to else for some reason
            if core != None and vm.runtime.powerState == 'poweredOn':
                print('VM must be powered off for cpu core changes')
            elif core != None and vm.runtime.powerState == 'poweredOff':
                config_spec.numCoresPerSocket = core
            else:
                pass

            config_spec.numCPUs = cpu
            config_spec.cpuAllocation = resource_allocation_spec
            task = vm.ReconfigVM_Task(config_spec)
            response = task
        elif vm.config.cpuHotAddEnabled == False and vm.runtime.powerState == 'poweredOn':
            print(
                'CPU HotAdd is currently disabled and VM is powered on, unable to update VM'
            )
            response = 'Failed'
            pass
        else:
            print('Somethings not right')
            pass
        return response
Esempio n. 13
0
 def get_vm_config(vm_config, devices, annotation=''):
     vmconf = vim.vm.ConfigSpec(deviceChange=devices)
     if vm_config['cpu']:
         vmconf.numCPUs = int(vm_config['cpu'])
     else:
         LOG.info("Number of CPU has not been provided. "
                  "Installer will use default 4 CPU")
     memory = vm_config['memory_in_mb']
     if memory:
         vmconf.memoryMB = long(memory)
     else:
         LOG.info("Number of RAM has not been provided. "
                  "Installer will use default 4096 MB of RAM")
         memory = 4096
     vmconf.annotation = annotation
     if reserve_guest_memory(vm_config.get('nics')):
         vmconf.memoryReservationLockedToMax = True
         vmconf.memoryAllocation = vim.ResourceAllocationInfo(
             reservation=long(memory))
     return vmconf
Esempio n. 14
0
    def memory_reservation(self, reser=0):
        """
        Configure memory reservation for a VM

        Args:
            reser (int): 0 (clear reservation) or
                        non-0 (reserve all memory that is configured)

        Returns:
            Task
        """
        config_spec = vim.vm.ConfigSpec()
        mem_alloc = vim.ResourceAllocationInfo()
        if reser:
            mem_alloc.reservation = self.vm_obj.config.hardware.memoryMB
            config_spec.memoryReservationLockedToMax = True
        else:
            mem_alloc.reservation = 0
            config_spec.memoryReservationLockedToMax = False
        config_spec.memoryAllocation = mem_alloc
        return self.vm_obj.ReconfigVM_Task(config_spec)
Esempio n. 15
0
    def cpu_reservation(self, host_cpu_mhz=None, reser=0):
        """
        Configure CPU reservation for a VM

        Args:
            host_cpu_mhz (int): if to reser, host_cpu_mhz must have a value
            reser (int): 0 (clear reservation) or
                         non-0 (reserve all vCPUs that is configured)

        Returns:
                Task
        """
        config_spec = vim.vm.ConfigSpec()
        cpu_alloc = vim.ResourceAllocationInfo()
        if reser:
            assert host_cpu_mhz is not None
            vm_cpu = self.vm_obj.config.hardware.numCPU
            cpu_alloc.reservation = int(vm_cpu * host_cpu_mhz)
        else:
            cpu_alloc.reservation = 0
        config_spec.cpuAllocation = cpu_alloc
        return self.vm_obj.ReconfigVM_Task(config_spec)
Esempio n. 16
0
    def turn_off_vm_memory_reservation(self, vm):
        # first check if memory reservation is >0
        try:
            if vm.config.memoryReservationLockedToMax:
                print "turn off memoryReservationLockedToMax"
                new_config = vim.VirtualMachineConfigSpec(
                    memoryReservationLockedToMax=False)
                task = vm.ReconfigVM_Task(spec=new_config)
                tasks.wait_for_tasks(self.si, [task])

            if vm.resourceConfig.memoryAllocation.reservation > 0:
                new_allocation = vim.ResourceAllocationInfo(reservation=0)
                new_config = vim.VirtualMachineConfigSpec(
                    memoryAllocation=new_allocation)
                task = vm.ReconfigVM_Task(spec=new_config)
                tasks.wait_for_tasks(self.si, [task])
            else:
                print 'resource reservation already at 0'

            self._power_on_vm_if_off(vm)
        except Exception as e:
            print 'unable to turn off reservation due to error: %s' % e
Esempio n. 17
0
 def flex_vm_memory(self, si, vm, mb, reserv=None):
     #flex vm memory
     #check if hotadd is enabled
     mb = int(mb)
     logger.debug('HotAdd set to : ' + str(vm.config.memoryHotAddEnabled))
     #create the config spec
     config_spec = vim.vm.ConfigSpec()
     if vm.config.memoryHotAddEnabled == True or vm.runtime.powerState == 'poweredOff':
         if reserv != None:
             reservraw = mb / 100 * int(reserv)
             reserv = int(reservraw)
             logger.debug('Reservation percentage requested : ' +
                          str(reserv) +
                          'Actual reservation to be set is : ' +
                          str(reservraw))
             resource_allocation_spec = vim.ResourceAllocationInfo()
             resource_allocation_spec.limit = mb
             resource_allocation_spec.reservation = reserv
             config_spec.memoryAllocation = resource_allocation_spec
         #Set memory
         config_spec.memoryMB = mb
         try:
             response = vm.ReconfigVM_Task(config_spec)
         except:
             logger.exception('Exception reconfiguring VM for memory flex')
     elif vm.config.memoryHotAddEnabled == False and vm.runtime.powerState == 'poweredOn':
         logger.warning(
             'Memory HotAdd is currently disabled and VM is powered on, unable to update VM'
         )
         exit()
         pass
     else:
         logger.warning(
             'Hot add state not found, VM object may be incorrect')
         pass
     return response
Esempio n. 18
0
def resourcepool_create(service_instance, parent, name,
                        cpuexpandableReservation, cpulimit, cpureservation,
                        cpushares, cpulevel, memoryexpandableReservation,
                        memorylimit, memoryreservation, memoryshares,
                        memorylevel):

    #cpuAllocation = vim.ResourceAllocationInfo()
    #cpuAllocation.expandableReservation = False
    #cpuAllocation.limit = -1
    #cpuAllocation.reservation = 1000
    #cpuShareInfo = vim.SharesInfo()
    #cpuShareInfo.shares = 1000
    #cpuSharesLevel = vim.SharesLevel('normal');
    #cpuShareInfo.level = cpuSharesLevel
    #cpuAllocation.shares = cpuShareInfo
    #print cpuAllocation

    #memoryAllocation = vim.ResourceAllocationInfo()
    #memoryAllocation.expandableReservation = False
    #memoryAllocation.limit = -1
    #memoryAllocation.reservation = 1000
    #memoryShareInfo = vim.SharesInfo()
    #memoryShareInfo.shares = 1000
    #memorySharesLevel = vim.SharesLevel('normal');
    #memoryShareInfo.level = memorySharesLevel
    #memoryAllocation.shares = memoryShareInfo
    #print memoryAllocation

    cpuAllocation = vim.ResourceAllocationInfo()
    cpuAllocation.expandableReservation = cpuexpandableReservation
    cpuAllocation.limit = cpulimit
    cpuAllocation.reservation = int(cpureservation)
    cpuShareInfo = vim.SharesInfo()
    cpuShareInfo.shares = int(cpushares)
    cpuSharesLevel = vim.SharesLevel(cpulevel)
    cpuShareInfo.level = cpuSharesLevel
    cpuAllocation.shares = cpuShareInfo
    #print cpuAllocation

    memoryAllocation = vim.ResourceAllocationInfo()
    memoryAllocation.expandableReservation = memoryexpandableReservation
    memoryAllocation.limit = memorylimit
    memoryAllocation.reservation = int(memoryreservation)
    memoryShareInfo = vim.SharesInfo()
    memoryShareInfo.shares = int(memoryshares)
    memorySharesLevel = vim.SharesLevel(memorylevel)
    memoryShareInfo.level = memorySharesLevel
    memoryAllocation.shares = memoryShareInfo
    #print memoryAllocation

    rpspec = vim.ResourceConfigSpec()
    rpspec.cpuAllocation = cpuAllocation
    rpspec.memoryAllocation = memoryAllocation

    #print rpspec
    content = service_instance.RetrieveContent()
    for respool in get_vim_objects(content, vim.ResourcePool):
        if respool.name == parent:
            print "Found parent resourcepool"
            newresp = respool.CreateResourcePool(name, rpspec)
            print "Created resourcepool " + newresp.name
Esempio n. 19
0
def create(ctx, server_client, name, use_external_resource):
    esxi_host_ip = ctx.node.properties['connection_config'].get('esxi_ip')
    esxi_host_username = ctx.node.properties['connection_config'].get(
        'esxi_username')
    esxi_host_password = ctx.node.properties['connection_config'].get(
        'esxi_password')
    vcenter_ip = ctx.node.properties['connection_config'].get('host')
    vcenter_username = ctx.node.properties['connection_config'].get('username')
    vcenter_password = ctx.node.properties['connection_config'].get('password')
    vmware_client = VMWareClient(vcenter_ip, vcenter_username,
                                 vcenter_password)
    ctx.logger.debug('Connect to vcenter (%s) successfully !' %
                     str(vcenter_ip))

    resource_pool_name = ctx.node.properties['connection_config'].get(
        'resource_pool_name')
    datacenter_name = ctx.node.properties['connection_config'].get(
        'datacenter_name')
    # cluster_name = ctx.node.properties['connection_config'].get('cluster_name')
    # ctx.logger.debug('++++++++++++++++++++++++++datacenter_name:%s'%str(datacenter_name))
    # dc = server_client._get_obj_by_name(vim.Datacenter, datacenter_name).hostFolder.AddStandaloneHost(spec=host_connect_spec,addConnected=True)
    # ctx.logger.debug('++++++++++++++++++++++++++dc:%s'%str(dc))
    # if not dc:
    #     vmware_client.create_datacenter(datacenter_name)
    #     ctx.logger.debug('datacenter:%s is created'%str(datacenter_name))
    #
    # eh = server_client._get_obj_by_name(vim.HostSystem,esxi_host_ip)

    ctx.logger.debug('esxi_host_ip 55 = %s' % str(esxi_host_ip))
    existing_id = server_client._get_obj_by_name(
        vim.Datacenter,
        datacenter_name,
    )
    si = SmartConnectNoSSL(host=vcenter_ip,
                           user=vcenter_username,
                           pwd=vcenter_password,
                           port=443)
    if existing_id is not None:
        existing_id = existing_id.id
    else:
        folder = si.RetrieveContent().rootFolder
        # ctx.logger.info('folder78=%s'%str(folder))
        host_connect_spec = vim.host.ConnectSpec()
        host_connect_spec.hostName = esxi_host_ip
        host_connect_spec.userName = esxi_host_username
        host_connect_spec.password = esxi_host_password
        host_connect_spec.force = True
        host_connect_spec.sslThumbprint = get_ssl_thumbprint(esxi_host_ip)

        folder.CreateDatacenter(
            name=datacenter_name).hostFolder.AddStandaloneHost(
                spec=host_connect_spec, addConnected=True)
        #ctx.logger.debug('new_host.hostFolder 90 = %s' % str(new_host.hostFolder))
        ctx.logger.debug('Add host to vcenter successfully')

        existing_id = server_client._get_obj_by_name(vim.Datacenter,
                                                     datacenter_name,
                                                     use_cache=False)

    runtime_properties = ctx.instance.runtime_properties
    runtime_properties['vsphere_datacenter_id'] = existing_id

    existing_id = server_client._get_obj_by_name(
        vim.ResourcePool,
        resource_pool_name,
    )
    ctx.logger.info('existing_id 103= %s' % str(existing_id))
    if existing_id is not None:

        existing_id = existing_id.id
    else:
        dc = si.content.rootFolder.childEntity
        for d in dc:
            for i in d.hostFolder.childEntity:
                ctx.logger.info('dc.hostFolder name  = %s' % str(i.name))
                #在指定esxi创建资源池
                if i.name == esxi_host_ip:
                    cr = d.hostFolder.childEntity[0]

                    rootResourcePool = cr.resourcePool

                    configSpec = vim.ResourceConfigSpec()
                    cpuAllocationInfo = vim.ResourceAllocationInfo()
                    memAllocationInfo = vim.ResourceAllocationInfo()
                    sharesInfo = vim.SharesInfo(level='normal')

                    cpuAllocationInfo.reservation = 0
                    cpuAllocationInfo.expandableReservation = True
                    cpuAllocationInfo.shares = sharesInfo
                    cpuAllocationInfo.limit = -1

                    memAllocationInfo.reservation = 0
                    memAllocationInfo.expandableReservation = True
                    memAllocationInfo.shares = sharesInfo
                    memAllocationInfo.limit = -1

                    configSpec.cpuAllocation = cpuAllocationInfo
                    configSpec.memoryAllocation = memAllocationInfo

                    rootResourcePool.CreateResourcePool(
                        resource_pool_name, configSpec)
                    while True:
                        existing_p_id = server_client._get_obj_by_name(
                            vim.ResourcePool,
                            resource_pool_name,
                            use_cache=False)
                        if existing_p_id:
                            ctx.logger.debug(
                                "Resource_pool created successful!")

                            existing_id = existing_p_id.id
                            break

    runtime_properties = ctx.instance.runtime_properties
    runtime_properties['vsphere_resource_pool_id'] = existing_id
Esempio n. 20
0
    def create_vm(self,
                  vm_name,
                  esx_name,
                  datastore,
                  iso=None,
                  resource_pool='/',
                  networks=None,
                  guestid="debian4Guest",
                  serial_port=None,
                  hw_version=None,
                  memorysize=512,
                  cpucount=1,
                  disk_space=1048576):
        self.reconnect()
        if not networks:
            networks = []
        datacenter = self._get_datacenter_mor()
        vm_folder = datacenter.vmFolder
        resource_pool = self._get_pool_mor(resource_pool, esx_name)
        compute = self._get_compute_mor(esx_name)
        host = self._get_host_mor(esx_name)
        default_devs = compute.environmentBrowser. \
            QueryConfigOption(host=host).defaultDevice
        conf_target = compute.environmentBrowser.QueryConfigTarget(host=host)
        devices = []

        if not [
                ds for ds in conf_target.datastore
                if ds.datastore.name == datastore
        ]:
            raise NotFoundException("Datastore '%ds' not found" % datastore)
        vm_path = "[%s] %s" % (datastore, vm_name)

        connectable = vim.vm.device.VirtualDevice.ConnectInfo(
            startConnected=True)
        if iso:
            assert iso.startswith('[') and '] ' in iso and iso.endswith(".iso")
            ide_ctrl = [
                dev for dev in default_devs
                if isinstance(dev, vim.vm.device.VirtualIDEController)
            ][0]
            iso_ds = self._get_datastore_mor(iso.split("] ")[0][1:])
            backing = vim.vm.device.VirtualCdrom.IsoBackingInfo(
                fileName=iso, datastore=iso_ds)
            cdrom = vim.vm.device.VirtualCdrom(backing=backing,
                                               key=3050 + randint(0, 99),
                                               connectable=connectable,
                                               controllerKey=ide_ctrl.key,
                                               unitNumber=0)
            cdrom_spec = vim.vm.device.VirtualDeviceSpec(operation="add",
                                                         device=cdrom)
            devices.append(cdrom_spec)

        if disk_space != 0:
            scsi_key = 1
            scsi_ctrl = vim.vm.device.VirtualLsiLogicController(
                busNumber=0, key=scsi_key, sharedBus="noSharing")
            scsi_spec = vim.vm.device.VirtualDeviceSpec(operation="add",
                                                        device=scsi_ctrl)
            devices.append(scsi_spec)

            backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo(
                diskMode="persistent", thinProvisioned=True)
            hdd = vim.vm.device.VirtualDisk(capacityInKB=disk_space,
                                            key=randint(50, 100),
                                            backing=backing,
                                            connectable=connectable,
                                            controllerKey=scsi_key,
                                            unitNumber=0)
            hdd_spec = vim.vm.device.VirtualDeviceSpec(operation="add",
                                                       fileOperation="create",
                                                       device=hdd)
            devices.append(hdd_spec)

        for net in networks:
            if not [n for n in conf_target.network if n.name == net]:
                raise NotFoundException(msg="Critical error! "
                                        "Network " + net + " is not exists")
            net_mor = self._get_from_list(host.network, "name", net)
            backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo(
                deviceName=net, network=net_mor)
            network = vim.vm.device.VirtualVmxnet3(addressType="generated",
                                                   backing=backing,
                                                   connectable=connectable,
                                                   key=randint(4005, 4999))
            network_spec = vim.vm.device.VirtualDeviceSpec(operation="add",
                                                           device=network)
            devices.append(network_spec)

        if serial_port:
            sio_ctrl = [
                dev for dev in default_devs
                if isinstance(dev, vim.vm.device.VirtualSIOController)
            ][0]
            backing = vim.vm.device.VirtualSerialPort.PipeBackingInfo(
                endpoint="server", pipeName=serial_port)
            com_port = vim.vm.device.VirtualSerialPort(
                backing=backing,
                key=9000,
                controllerKey=sio_ctrl.key,
                unitNumber=0,
                yieldOnPoll=True)
            com_spec = vim.vm.device.VirtualDeviceSpec(operation="add",
                                                       device=com_port)
            devices.append(com_spec)

        if isinstance(hw_version, int):
            vm_version = "vmx-" + (str(hw_version) if hw_version > 9 else "0" +
                                   str(hw_version))
        else:
            vm_version = "vmx-08"

        vmx_file = vim.vm.FileInfo(logDirectory=None,
                                   snapshotDirectory=None,
                                   suspendDirectory=None,
                                   vmPathName=vm_path)

        config = vim.vm.ConfigSpec(
            name=vm_name,
            version=vm_version,
            guestId=guestid,
            files=vmx_file,
            numCPUs=cpucount,
            numCoresPerSocket=cpucount,
            memoryMB=memorysize,
            deviceChange=devices,
            cpuAllocation=vim.ResourceAllocationInfo(limit=2000),
            memoryAllocation=vim.ResourceAllocationInfo(limit=memorysize),
            swapPlacement="hostLocal")

        try:
            vm_folder.CreateVM_Task(config=config, pool=resource_pool).wait()
        except Exception as e:
            logging.debug(str(vm_name))
            logging.debug(e)
            if hasattr(e, "msg"):
                logging.debug(str(e.msg))
            if hasattr(e, "message"):
                logging.debug(str(e.message))
            raise
Esempio n. 21
0
def main():
    module = AnsibleModule(
        argument_spec=dict(
            ovftool_path=dict(required=True, type='str'),
            vcenter_host=dict(required=True, type='str'),
            vcenter_user=dict(required=True, type='str'),
            vcenter_password=dict(required=True, type='str', no_log=True),
            ssl_verify=dict(required=False, type='bool', default=False),
            state=dict(required=False, type='str', default='present'),
            se_vmw_host=dict(required=False, type='str'),
            se_vmw_datacenter=dict(required=False, type='str'),
            se_vmw_cluster=dict(required=False, type='str'),
            se_vmw_datastore=dict(required=False, type='str'),
            se_vmw_ovf_networks=dict(required=False, type='dict'),
            se_vmw_disk_mode=dict(required=False, type='str', default='thin'),
            se_vmw_ova_path=dict(required=True, type='str'),
            se_vmw_vm_name=dict(required=True, type='str'),
            se_vmw_power_on=dict(required=False, type='bool', default=True),
            se_vmw_vcenter_folder=dict(required=False, type='str'),
            se_vmw_mgmt_ip=dict(required=False, type='str'),
            se_vmw_mgmt_mask=dict(required=False, type='str'),
            se_vmw_default_gw=dict(required=False, type='str'),
            se_vmw_sysadmin_public_key=dict(required=False, type='str'),
            se_auth_token=dict(required=True, type='str'),
            se_cluster_uuid=dict(required=True, type='str'),
            se_master_ctl_ip=dict(required=True, type='str'),
            se_vmw_number_of_cpus=dict(required=False, type='int'),
            se_vmw_cpu_reserved=dict(required=False, type='int'),
            se_vmw_memory=dict(required=False, type='int'),
            se_vmw_memory_reserved=dict(required=False, type='int'),
            se_vmw_disk_size=dict(required=False, type='int'),
            se_vmw_ovf_properties=dict(required=False, type='dict'),
        ),
        supports_check_mode=True,
    )

    try:
        si = SmartConnectNoSSL(host=module.params['vcenter_host'],
                               user=module.params['vcenter_user'],
                               pwd=module.params['vcenter_password'])
        atexit.register(Disconnect, si)
    except vim.fault.InvalidLogin:
        return module.fail_json(
            msg='exception while connecting to vCenter, login failure, '
            'check username and password')
    except requests.exceptions.ConnectionError:
        return module.fail_json(
            msg='exception while connecting to vCenter, check hostname, '
            'FQDN or IP')
    check_mode = module.check_mode
    if module.params['state'] == 'absent':
        vm = get_vm_by_name(si, module.params['se_vmw_vm_name'])

        if vm is None:
            return module.exit_json(msg='A VM with the name %s not found' %
                                    (module.params['se_vmw_vm_name']))

        if check_mode:
            return module.exit_json(msg='A VM with the name %s found' %
                                    (module.params['se_vmw_vm_name']),
                                    changed=True)

        if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
            task = vm.PowerOffVM_Task()
            wait_for_tasks(si, [task])

        task = vm.Destroy_Task()
        wait_for_tasks(si, [task])

        return module.exit_json(
            msg='A VM with the name %s deleted successfully' %
            (module.params['se_vmw_vm_name']),
            changed=True)

    if module.params.get('se_vmw_datacenter', None):
        dc = get_dc(si, module.params['se_vmw_datacenter'])
    else:
        dc = si.content.rootFolder.childEntity[0]

    if module.params.get('se_vmw_cluster', None):
        cl = get_cluster(si, dc, module.params['se_vmw_cluster'])
    else:
        cl = get_first_cluster(si, dc)

    host_name = module.params.get('se_vmw_host', None)
    datastore_name = module.params.get('se_vmw_datastore', None)
    if host_name and datastore_name:
        host = get_host(cl, host_name)
        ds = get_ds(host, datastore_name, inst_type='host')
    elif host_name:
        host = get_host(cl, host_name)
        ds = get_largest_free_ds(host)
    elif datastore_name:
        ds = get_ds(cl, datastore_name, inst_type="datacenter")
    else:
        ds = get_largest_free_ds(cl)

    if is_vm_exist(si, cl, module.params['se_vmw_vm_name']):
        vm = get_vm_by_name(si, module.params['se_vmw_vm_name'])
        vm_path = compile_folder_path_for_object(vm)
        folder = get_folder_by_path(si, dc,
                                    module.params['se_vmw_vcenter_folder'])
        folder_path = compile_folder_path_for_object(folder)
        changed = False
        if vm_path != folder_path:
            # migrate vm to new folder
            if not check_mode:
                folder.MoveInto([vm])
            changed = True
        if (not module.params['se_vmw_power_on']) and \
                vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
            if not check_mode:
                task = vm.PowerOffVM_Task()
                wait_for_tasks(si, [task])
            changed = True
        if module.params['se_vmw_power_on'] and vm.runtime.powerState == \
                vim.VirtualMachinePowerState.poweredOff:
            if not check_mode:
                task = vm.PowerOnVM_Task()
                wait_for_tasks(si, [task])
            changed = True

        if module.params.get('se_vmw_datastore', None):
            ds_names = []
            for datastore in vm.datastore:
                ds_names.append(datastore.name)
            if ds.name not in ds_names:
                module.fail_json(msg='VM datastore cant be modified')

        if module.params.get('se_vmw_mgmt_ip', None):
            ip_addresses = get_vm_ips(vm)
            if (ip_addresses
                    and not module.params['se_vmw_mgmt_ip'] in ip_addresses):
                module.fail_json(msg='VM static ip address cant be modified')
        if changed and not check_mode:
            module.exit_json(msg='A VM with the name %s updated successfully' %
                             (module.params['se_vmw_vm_name']),
                             changed=True)
        if changed and check_mode:
            module.exit_json(changed=True)
        else:
            module.exit_json(msg='A VM with the name %s is already present' %
                             (module.params['se_vmw_vm_name']))

    ova_file = module.params['se_vmw_ova_path']
    if (module.params['se_vmw_ova_path'].startswith('http')):
        if (requests.head(module.params['se_vmw_ova_path']).status_code !=
                200):
            module.fail_json(
                msg='SE OVA not found or readable from specified URL path')
    if (not os.path.isfile(ova_file) or not os.access(ova_file, os.R_OK)):
        module.fail_json(msg='SE OVA not found or not readable')

    ovftool_exec = '%s/ovftool' % module.params['ovftool_path']
    quoted_vcenter_user = quote(module.params['vcenter_user'])
    quoted_vcenter_password = quote(module.params['vcenter_password'])

    vi_string = 'vi://%s:%s@%s' % (quoted_vcenter_user,
                                   quoted_vcenter_password,
                                   module.params['vcenter_host'])
    vi_string += '/%s%s/%s' % (dc.name, compile_folder_path_for_object(cl),
                               cl.name)
    if host_name:
        vi_string += '/' + host_name
    command_tokens = [ovftool_exec]

    if module.params['se_vmw_power_on'] and not is_reconfigure_vm(module):
        command_tokens.append('--powerOn')
    if not module.params['ssl_verify']:
        command_tokens.append('--noSSLVerify')
    if check_mode:
        command_tokens.append('--verifyOnly')
    command_tokens.extend([
        '--acceptAllEulas', '--skipManifestCheck', '--allowExtraConfig',
        '--diskMode=%s' % module.params['se_vmw_disk_mode'],
        '--datastore=%s' % ds.name,
        '--name=%s' % module.params['se_vmw_vm_name']
    ])

    if ('se_vmw_ovf_networks' in module.params.keys()
            and module.params['se_vmw_ovf_networks'] is not None):
        d = module.params['se_vmw_ovf_networks']
        for key, network_item in d.items():
            command_tokens.append('--net:%s=%s' % (key, network_item))

    command_tokens.extend([
        '--prop:%s=%s' % ('AVICNTRL', module.params['se_master_ctl_ip']),
        '--prop:%s=%s' %
        ('AVICNTRL_AUTHTOKEN', module.params['se_auth_token']),
        '--prop:%s=%s' %
        ('AVICNTRL_CLUSTERUUID', module.params['se_cluster_uuid'])
    ])

    if module.params.get('se_vmw_mgmt_ip', None):
        command_tokens.append(
            '--prop:%s=%s' %
            ('avi.mgmt-ip.SE', module.params['se_vmw_mgmt_ip']))

    if module.params.get('se_vmw_mgmt_mask', None):
        command_tokens.append(
            '--prop:%s=%s' %
            ('avi.mgmt-mask.SE', module.params['se_vmw_mgmt_mask']))

    if module.params.get('se_vmw_default_gw', None):
        command_tokens.append(
            '--prop:%s=%s' %
            ('avi.default-gw.SE', module.params['se_vmw_default_gw']))

    if module.params.get('se_vmw_sysadmin_public_key', None):
        command_tokens.append(
            '--prop:%s=%s' %
            ('avi.sysadmin-public-key.SE',
             get_sysadmin_key(module.params['se_vmw_sysadmin_public_key'])))

    if module.params.get('se_vmw_ovf_properties', None):
        for key in module.params['se_vmw_ovf_properties'].keys():
            command_tokens.append(
                '--prop:%s=%s' %
                (key, module.params['se_vmw_ovf_properties'][key]))

    if ('se_vmw_vcenter_folder' in module.params
            and module.params['se_vmw_vcenter_folder'] is not None):
        command_tokens.append('--vmFolder=%s' %
                              module.params['se_vmw_vcenter_folder'])

    command_tokens.extend([ova_file, vi_string])
    ova_tool_result = module.run_command(command_tokens)

    if ova_tool_result[0] != 0:
        return module.fail_json(
            msg='Failed to deploy OVA, error message from ovftool is: %s '
            'for command %s' % (ova_tool_result[1], command_tokens))

    if is_reconfigure_vm(module):
        vm = get_vm_by_name(si, module.params['se_vmw_vm_name'])
        cspec = vim.vm.ConfigSpec()
        if is_update_cpu(module):
            cspec.numCPUs = module.params['se_vmw_number_of_cpus']
        if is_update_memory(module):
            cspec.memoryMB = module.params['se_vmw_memory']
        if is_reserve_memory(module):
            cspec.memoryAllocation = vim.ResourceAllocationInfo(
                reservation=module.params['se_vmw_memory_reserved'])
        if is_reserve_cpu(module):
            cspec.cpuAllocation = vim.ResourceAllocationInfo(
                reservation=module.params['se_vmw_cpu_reserved'])
        if is_resize_disk(module):
            disk = None
            for device in vm.config.hardware.device:
                if isinstance(device, vim.vm.device.VirtualDisk):
                    disk = device
                    break
            if disk is not None:
                disk.capacityInKB = module.params[
                    'se_vmw_disk_size'] * 1024 * 1024
                devSpec = vim.vm.device.VirtualDeviceSpec(device=disk,
                                                          operation="edit")
                cspec.deviceChange.append(devSpec)
        wait_for_tasks(si, [vm.Reconfigure(cspec)])

        task = vm.PowerOnVM_Task()
        wait_for_tasks(si, [task])

    return module.exit_json(changed=True, ova_tool_result=ova_tool_result)
Esempio n. 22
0
def main():
    module = AnsibleModule(
        argument_spec=dict(
            ovftool_path=dict(required=True, type='str'),
            vcenter_host=dict(required=True, type='str'),
            vcenter_user=dict(required=True, type='str'),
            vcenter_password=dict(required=True, type='str', no_log=True),
            ssl_verify=dict(required=False, type='bool', default=False),
            state=dict(required=False, type='str', default='present'),
            con_datacenter=dict(required=False, type='str'),
            con_cluster=dict(required=False, type='str'),
            con_datastore=dict(required=False, type='str'),
            con_mgmt_network=dict(required=True, type='str'),
            con_disk_mode=dict(required=False, type='str', default='thin'),
            con_ova_path=dict(required=True, type='str'),
            con_vm_name=dict(required=True, type='str'),
            con_power_on=dict(required=False, type='bool', default=True),
            con_vcenter_folder=dict(required=False, type='str'),
            con_mgmt_ip=dict(required=False, type='str'),
            con_mgmt_mask=dict(required=False, type='str'),
            con_default_gw=dict(required=False, type='str'),
            con_sysadmin_public_key=dict(required=False, type='str'),
            con_number_of_cpus=dict(required=False, type='int'),
            con_cpu_reserved=dict(required=False, type='int'),
            con_memory=dict(required=False, type='int'),
            con_memory_reserved=dict(required=False, type='int'),
            con_disk_size=dict(required=False, type='int'),
            con_ovf_properties=dict(required=False, type='dict'),
            # Max time to wait for controller up state
            con_wait_time=dict(required=False, type='int', default=3600),
            # Retry after every rount_wait time to check for controller state.
            round_wait=dict(required=False, type='int', default=10),
        ),
        supports_check_mode=True,
    )
    try:
        si = SmartConnectNoSSL(host=module.params['vcenter_host'],
                               user=module.params['vcenter_user'],
                               pwd=module.params['vcenter_password'])
        atexit.register(Disconnect, si)
    except vim.fault.InvalidLogin:
        return module.fail_json(
            msg='exception while connecting to vCenter, login failure, '
                'check username and password')
    except requests.exceptions.ConnectionError:
        return module.fail_json(
            msg='exception while connecting to vCenter, check hostname, '
                'FQDN or IP')
    check_mode = module.check_mode
    if module.params['state'] == 'absent':
        vm = get_vm_by_name(si, module.params['con_vm_name'])

        if vm is None:
            return module.exit_json(msg='A VM with the name %s not found' % (
                module.params['con_vm_name']))

        if check_mode:
            return module.exit_json(msg='A VM with the name %s found' % (
                module.params['con_vm_name']), changed=True)

        if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
            task = vm.PowerOffVM_Task()
            wait_for_tasks(si, [task])

        task = vm.Destroy_Task()
        wait_for_tasks(si, [task])

        return module.exit_json(msg='A VM with the name %s deleted successfully'
                                    % (module.params['con_vm_name']))

    if module.params.get('con_datacenter', None):
        dc = get_dc(si, module.params['con_datacenter'])
    else:
        dc = si.content.rootFolder.childEntity[0]

    if module.params.get('con_cluster', None):
        cl = get_cluster(si, dc, module.params['con_cluster'])
    else:
        cl = get_first_cluster(si, dc)

    if module.params.get('con_datastore', None):
        ds = get_ds(cl, module.params['con_datastore'])
    else:
        ds = get_largest_free_ds(cl)

    if is_vm_exist(si, cl, module.params['con_vm_name']):
        vm = get_vm_by_name(si, module.params['con_vm_name'])
        vm_path = compile_folder_path_for_object(vm)
        folder = get_folder_by_path(si, dc, module.params['con_vcenter_folder'])
        folder_path = compile_folder_path_for_object(folder)
        changed = False
        if vm_path != folder_path:
            # migrate vm to new folder
            if not check_mode:
                folder.MoveInto([vm])
            changed = True
        if (not module.params['con_power_on']) and \
                vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
            if not check_mode:
                task = vm.PowerOffVM_Task()
                wait_for_tasks(si, [task])
            changed = True
        if module.params['con_power_on'] and vm.runtime.powerState == \
                vim.VirtualMachinePowerState.poweredOff:
            if not check_mode:
                task = vm.PowerOnVM_Task()
                wait_for_tasks(si, [task])
            changed = True

        if module.params.get('con_datastore', None):
            ds_names = []
            for datastore in vm.datastore:
                ds_names.append(datastore.name)
            if ds.name not in ds_names:
                module.fail_json(msg='VM datastore cant be modified')

        if module.params.get('con_mgmt_ip', None):
            ip_addresses = get_vm_ips(vm)
            if (ip_addresses and
                    not module.params['con_mgmt_ip'] in ip_addresses):
                module.fail_json(msg='VM static ip address cant be modified')

        if is_reconfigure_vm(module):
            if not check_mode:
                vmSummary = vm.summary.config
                cspec = vim.vm.ConfigSpec()

                if is_resize_disk(module):
                    disk = None
                    for device in vm.config.hardware.device:
                        if isinstance(device, vim.vm.device.VirtualDisk):
                            disk = device
                            break

                if vmSummary.numCpu != module.params['con_number_of_cpus'] or \
                        vmSummary.memorySizeMB != module.params['con_memory'] or \
                        vmSummary.memoryReservation != module.params['con_memory_reserved'] or \
                        vmSummary.cpuReservation != module.params['con_cpu_reserved'] or \
                        (disk is not None and disk.capacityInKB != module.params['con_disk_size'] * 1024 * 1024):
                    if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
                        task = vm.PowerOffVM_Task()
                        wait_for_tasks(si, [task])
                    if is_update_cpu(module):
                        if vmSummary.numCpu != module.params['con_number_of_cpus']:
                            cspec.numCPUs = module.params['con_number_of_cpus']
                            changed = True
                    if is_update_memory(module):
                        if vmSummary.memorySizeMB != module.params['con_memory']:
                            cspec.memoryMB = module.params['con_memory']
                            changed = True
                    if is_reserve_memory(module):
                        if vmSummary.memoryReservation != module.params['con_memory_reserved']:
                            cspec.memoryAllocation = vim.ResourceAllocationInfo(
                                reservation=module.params['con_memory_reserved'])
                            changed = True
                    if is_reserve_cpu(module):
                        if vmSummary.cpuReservation != module.params['con_cpu_reserved']:
                            cspec.cpuAllocation = vim.ResourceAllocationInfo(
                                reservation=module.params['con_cpu_reserved'])
                            changed = True
                    if is_resize_disk(module):
                        if disk.capacityInKB != module.params['con_disk_size'] * 1024 * 1024:
                            disk.capacityInKB = module.params['con_disk_size'] * 1024 * 1024
                            devSpec = vim.vm.device.VirtualDeviceSpec(
                                device=disk, operation="edit")
                            cspec.deviceChange.append(devSpec)
                            changed = True
                    WaitForTasks([vm.Reconfigure(cspec)], si=si)

                    if module.params['con_power_on']:
                        task = vm.PowerOnVM_Task()
                        WaitForTasks([task], si=si)

        if changed and not check_mode:
            module.exit_json(msg='A VM with the name %s updated successfully' %
                                 (module.params['con_vm_name']), changed=True)
        if changed and check_mode:
            module.exit_json(changed=True)
        else:
            module.exit_json(
                msg='A VM with the name %s is already present' % (
                    module.params['con_vm_name']))

    if (module.params['con_ova_path'].startswith('http')):
        if (requests.head(module.params['con_ova_path']).status_code != 200):
                module.fail_json(msg='Controller OVA not found or readable from specified URL path')
    else:
        if (not os.path.isfile(module.params['con_ova_path']) or
                not os.access(module.params['con_ova_path'], os.R_OK)):
                module.fail_json(msg='Controller OVA not found or not readable')

    ovftool_exec = '%s/ovftool' % module.params['ovftool_path']
    ova_file = module.params['con_ova_path']
    quoted_vcenter_user = quote(module.params['vcenter_user'])
    quoted_vcenter_pass = quote(module.params['vcenter_password'])
    vi_string = 'vi://%s:%s@%s' % (
        quoted_vcenter_user, quoted_vcenter_pass,
        module.params['vcenter_host'])
    vi_string += '/%s%s/%s' % (dc.name, compile_folder_path_for_object(cl),
                               cl.name)
    command_tokens = [ovftool_exec]

    if module.params['con_power_on'] and not is_reconfigure_vm(module):
        command_tokens.append('--powerOn')
    if not module.params['ssl_verify']:
        command_tokens.append('--noSSLVerify')
    if check_mode:
        command_tokens.append('--verifyOnly')
    command_tokens.extend([
        '--acceptAllEulas',
        '--skipManifestCheck',
        '--allowExtraConfig',
        '--diskMode=%s' % module.params['con_disk_mode'],
        '--datastore=%s' % ds.name,
        '--name=%s' % module.params['con_vm_name']
    ])

    if ('ovf_network_name' in module.params.keys() and
            module.params['ovf_network_name'] is not None and
            len(module.params['ovf_network_name']) > 0):
            try:
                d = json.loads(
                    module.params['ovf_network_name'].replace("'", "\""))
                for key, network_item in d.iteritems():
                    command_tokens.append('--net:%s=%s' % (key, network_item))
            except ValueError:
                command_tokens.append('--net:%s=%s' % (
                    module.params['ovf_network_name'],
                    module.params['con_mgmt_network']))
    else:
        command_tokens.append(
            '--network=%s' % module.params['con_mgmt_network'])

    if module.params.get('con_mgmt_ip', None):
        command_tokens.append('--prop:%s=%s' % (
            'avi.mgmt-ip.CONTROLLER', module.params['con_mgmt_ip']))

    if module.params.get('con_mgmt_mask', None):
        command_tokens.append('--prop:%s=%s' % (
            'avi.mgmt-mask.CONTROLLER', module.params['con_mgmt_mask']))

    if module.params.get('con_default_gw', None):
        command_tokens.append('--prop:%s=%s' % (
            'avi.default-gw.CONTROLLER', module.params['con_default_gw']))

    if module.params.get('con_sysadmin_public_key', None):
        command_tokens.append('--prop:%s=%s' % (
            'avi.sysadmin-public-key.CONTROLLER',
            get_sysadmin_key(module.params['con_sysadmin_public_key'])))

    if module.params.get('con_ovf_properties', None):
        for key in module.params['con_ovf_properties'].keys():
            command_tokens.append(
                '--prop:%s=%s' % (
                    key, module.params['con_ovf_properties'][key]))

    if ('con_vcenter_folder' in module.params and
            module.params['con_vcenter_folder'] is not None):
        command_tokens.append(
            '--vmFolder=%s' % module.params['con_vcenter_folder'])

    command_tokens.extend([ova_file, vi_string])
    ova_tool_result = module.run_command(command_tokens)

    if ova_tool_result[0] != 0:
        return module.fail_json(
            msg='Failed to deploy OVA, error message from ovftool is: %s '
                'for command %s' % (ova_tool_result[1], command_tokens))


    vm = None
    if is_reconfigure_vm(module):
        vm = get_vm_by_name(si, module.params['con_vm_name'])
        cspec = vim.vm.ConfigSpec()
        if is_update_cpu(module):
            cspec.numCPUs = module.params['con_number_of_cpus']
        if is_update_memory(module):
            cspec.memoryMB = module.params['con_memory']
        if is_reserve_memory(module):
            cspec.memoryAllocation = vim.ResourceAllocationInfo(
                reservation=module.params['con_memory_reserved'])
        if is_reserve_cpu(module):
            cspec.cpuAllocation = vim.ResourceAllocationInfo(
                reservation=module.params['con_cpu_reserved'])
        if is_resize_disk(module):
            disk = None
            for device in vm.config.hardware.device:
                if isinstance(device, vim.vm.device.VirtualDisk):
                    disk = device
                    break
            if disk is not None:
                disk.capacityInKB = module.params['con_disk_size'] * 1024 * 1024
                devSpec = vim.vm.device.VirtualDeviceSpec(
                    device=disk, operation="edit")
                cspec.deviceChange.append(devSpec)
        WaitForTasks([vm.Reconfigure(cspec)], si=si)

        task = vm.PowerOnVM_Task()
        WaitForTasks([task], si=si)

    if not vm:
        vm = get_vm_by_name(si, module.params['con_vm_name'])

    if not module.params['con_mgmt_ip']:
        interval = 15
        timeout = 300
        controller_ip = None
        while timeout > 0:
            controller_ip = get_vm_ip_by_network(vm, module.params['con_mgmt_network'])
            if controller_ip:
                controller_ip = controller_ip[0]
                break
            time.sleep(interval)
            timeout -= interval
    else:
        controller_ip = module.params['con_mgmt_ip']

    # Wait for controller tcontroller_waito come up for given con_wait_time
    if controller_ip:
        controller_up = controller_wait(controller_ip, module.params['round_wait'],
                                    module.params['con_wait_time'])
        if not controller_up:
            return module.fail_json(
                msg='Something wrong with the controller. The Controller is not in the up state.')
    return module.exit_json(changed=True, ova_tool_result=ova_tool_result)