Example #1
0
    def delete_vm_by_name(self, name, remove_files=True):
        """
        Unregisters a VM and remove it files from the datastore by name.
        @name is the VM name.
        @remove_files - if True (default) will delete VM files from datastore.
        """
        statusLine = ''
        success = False

        if not self.__logged:
            raise VIException("Must call 'connect' before invoking this method", FaultTypes.NOT_CONNECTED)
        try:
            # Get VM
            vm = self.get_vm_by_name(name)

            if remove_files:
                # Invoke Destroy_Task
                request = VI.Destroy_TaskRequestMsg()

                _this = request.new__this(vm._mor)
                _this.set_attribute_type(vm._mor.get_attribute_type())
                request.set_element__this(_this)
                ret = self._proxy.Destroy_Task(request)._returnval
                task = VITask(ret, self)
                
                # Wait for the task to finish
                status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])

                if status == task.STATE_SUCCESS:
                    statusLine = "VM successfully unregistered and deleted from datastore"
                    success = True

                elif status == task.STATE_ERROR:
                    statusLine = "Error removing vm: {}".format(task.get_error_message())
                    success = False

            else:
                # Invoke UnregisterVMRequestMsg
                request = VI.UnregisterVMRequestMsg()

                _this = request.new__this(vm._mor)
                _this.set_attribute_type(vm._mor.get_attribute_type())
                request.set_element__this(_this)
                ret = self._proxy.UnregisterVM(request)
                task = VITask(ret, self)

                statusLine = "VM successfully unregistered (files still on datastore)"
                success = True

        except VI.ZSI.FaultException as e:
            raise VIApiException(e)

        finally:
            return success, statusLine
Example #2
0
def delete_vm(vm):
    vm_name = vm.get_property('name', from_cache=False)
    log("Preparing to delete VM %s..." % vm_name)
    vm_status = vm.get_status()
    log("VM status: %s" % vm_status)
    if vm_status == "POWERED OFF" or vm_status == "POWERING OFF":
        log("VM power state: %s" % vm_status)
    else:
        log("Powering off VM %s..." % vm_name)
        vm.power_off()

    log("Deleting VM %s..." % vm_name)
    request = VI.Destroy_TaskRequestMsg()
    _this = request.new__this(vm._mor)
    _this.set_attribute_type(vm._mor.get_attribute_type())
    request.set_element__this(_this)
    ret = server._proxy.Destroy_Task(request)._returnval

    # Wait for the delete task to finish
    task = VITask(ret, server)

    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status == task.STATE_SUCCESS:
        log("VM successfully deleted from disk.")
    elif status == task.STATE_ERROR:
        error_msg = "Error while deleting VM: " + task.get_error_message()
        raise Exception(error_msg)
def delete_vm(vsphere_client, module, guest, vm, force):
    try:

        if vm.is_powered_on():
            if force:
                try:
                    vm.power_off(sync_run=True)
                    vm.get_status()

                except Exception, e:
                    module.fail_json(
                        msg='Failed to shutdown vm %s: %s' % (guest, e))
            else:
                module.fail_json(
                    msg='You must use either shut the vm down first or '
                    'use force ')

        # Invoke Destroy_Task
        request = VI.Destroy_TaskRequestMsg()
        _this = request.new__this(vm._mor)
        _this.set_attribute_type(vm._mor.get_attribute_type())
        request.set_element__this(_this)
        ret = vsphere_client._proxy.Destroy_Task(request)._returnval
        task = VITask(ret, vsphere_client)

        # Wait for the task to finish
        status = task.wait_for_state(
            [task.STATE_SUCCESS, task.STATE_ERROR])
        if status == task.STATE_ERROR:
            vsphere_client.disconnect()
            module.fail_json(msg="Error removing vm: %s %s" %
                             task.get_error_message())
        module.exit_json(changed=True, changes="VM %s deleted" % guest)
Example #4
0
    def rename(self, new_name, sync_run=True):
        """
        Renames this managed entity.
          * new_name: Any / (slash), \ (backslash), character used in this name
            element will be escaped. Similarly, any % (percent) character used
            in this name element will be escaped, unless it is used to start an
            escape sequence. A slash is escaped as %2F or %2f. A backslash is
            escaped as %5C or %5c, and a percent is escaped as %25.
          * sync_run: (default True), If False does not wait for the task to
            finish and returns an instance of a VITask for the user to monitor
            its progress 
        """
        try:
            request = VI.Rename_TaskRequestMsg()
            _this = request.new__this(self._mor)
            _this.set_attribute_type(self._mor.get_attribute_type())
            request.set_element__this(_this)
            request.set_element_newName(new_name)

            task = self._server._proxy.Rename_Task(request)._returnval
            vi_task = VITask(task, self._server)
            if sync_run:
                status = vi_task.wait_for_state(
                    [vi_task.STATE_SUCCESS, vi_task.STATE_ERROR])
                if status == vi_task.STATE_ERROR:
                    raise VIException(vi_task.get_error_message(),
                                      FaultTypes.TASK_ERROR)
                return

            return vi_task

        except (VI.ZSI.FaultException) as e:
            raise VIApiException(e)
Example #5
0
    def destroy(self, sync_run=True):
        """
        Destroys this object, deleting its contents and removing it from its 
        parent folder (if any)
        * sync_run: (default True), If False does not wait for the task to
            finish and returns an instance of a VITask for the user to monitor
            its progress
        """
        try:
            request = VI.Destroy_TaskRequestMsg()
            _this = request.new__this(self._mor)
            _this.set_attribute_type(self._mor.get_attribute_type())
            request.set_element__this(_this)

            task = self._server._proxy.Destroy_Task(request)._returnval
            vi_task = VITask(task, self._server)
            if sync_run:
                status = vi_task.wait_for_state(
                    [vi_task.STATE_SUCCESS, vi_task.STATE_ERROR])
                if status == vi_task.STATE_ERROR:
                    raise VIException(vi_task.get_error_message(),
                                      FaultTypes.TASK_ERROR)
                return

            return vi_task
        except (VI.ZSI.FaultException) as e:
            raise VIApiException(e)
Example #6
0
    def delete_instance(self, name=None):
        # Check if name is passed, if not extract from vm_json
        if not name:
            name = self.vm_json.get('name')

        # Initialise vcenter handle
        vcenter_handle = self.server
        try:
            vm = vcenter_handle.get_vm_by_name(name)
        except Exception:
            logger.info('VM %s not present in vCenter. This is OK' % name)
            return
        # Power off if not already
        if not vm.is_powered_off():
            vm.power_off()
        # Invoke Destroy_Task
        request = VI.Destroy_TaskRequestMsg()
        _this = request.new__this(vm._mor)
        _this.set_attribute_type(vm._mor.get_attribute_type())
        request.set_element__this(_this)
        ret = vcenter_handle._proxy.Destroy_Task(request)._returnval

        # Wait for the task to finish
        task = VITask(ret, vcenter_handle)

        status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
        if status == task.STATE_SUCCESS:
            logger.info('VM %s successfully deleted from disk' % name)
        elif status == task.STATE_ERROR:
            logger.info('Error removing vm: %s' % task.get_error_message())
Example #7
0
    def register_vm(self, path, name=None, sync_run=True, folder=None,
                    template=False, resourcepool=None, host=None):
        """Adds an existing virtual machine to the folder.
        @path: a datastore path to the virtual machine.
            Example "[datastore] path/to/machine.vmx".
        @name: the name to be assigned to the virtual machine.
            If this parameter is not set, the displayName configuration
            parameter of the virtual machine is used.
        @sync_run: if True (default) waits for the task to finish, and returns
            a VIVirtualMachine instance with the new VM (raises an exception if
            the task didn't succeed). If @sync_run is set to False the task is
            started and a VITask instance is returned
        @folder_name: folder in which to register the virtual machine.
        @template: Flag to specify whether or not the virtual machine
            should be marked as a template.
        @resourcepool: MOR of the resource pool to which the virtual machine should
            be attached. If imported as a template, this parameter is not set.
        @host: The target host on which the virtual machine will run. This
            parameter must specify a host that is a member of the ComputeResource
            indirectly specified by the pool. For a stand-alone host or a cluster
            with DRS, the parameter can be omitted, and the system selects a default.
        """
        if not folder:
            folders = self._get_managed_objects(MORTypes.Folder)
            folder = [_mor for _mor, _name in folders.iteritems()
                          if _name == 'vm'][0]
        try:
            request = VI.RegisterVM_TaskRequestMsg()
            _this = request.new__this(folder)
            _this.set_attribute_type(folder.get_attribute_type())
            request.set_element__this(_this)
            request.set_element_path(path)
            if name:
                request.set_element_name(name)
            request.set_element_asTemplate(template)
            if resourcepool:
                pool = request.new_pool(resourcepool)
                pool.set_attribute_type(resourcepool.get_attribute_type())
                request.set_element_pool(pool)
            if host:
                if not VIMor.is_mor(host):
                    host = VIMor(host, MORTypes.HostSystem)
                    hs = request.new_host(host)
                    hs.set_attribute_type(host.get_attribute_type())
                    request.set_element_host(hs)

            task = self._proxy.RegisterVM_Task(request)._returnval
            vi_task = VITask(task, self)
            if sync_run:
                status = vi_task.wait_for_state([vi_task.STATE_SUCCESS,
                                                 vi_task.STATE_ERROR])
                if status == vi_task.STATE_ERROR:
                    raise VITaskException(vi_task.info.error)
                return

            return vi_task

        except (VI.ZSI.FaultException), e:
            raise VIApiException(e)
Example #8
0
 def change_dvs_net(self, vm, pg_map, dc_name):
     """Takes a VIServer and VIVirtualMachine object and reconfigures
     dVS portgroups according to the mappings in the pg_map dict. The
     pg_map dict must contain the source portgroup as key and the
     destination portgroup as value"""
     # Find virtual NIC devices
     vm_obj = self.__s.get_vm_by_name(vm)
     uuid = self.GetDvsUuid(dc_name, pg_map)
     if vm_obj:
         net_device = []
         for dev in vm_obj.properties.config.hardware.device:
             if dev._type in ["VirtualE1000", "VirtualE1000e",
                             "VirtualPCNet32", "VirtualVmxnet",
                             "VirtualNmxnet2", "VirtualVmxnet3"]:
                 net_device.append(dev)
 
     # Throw an exception if there is no NIC found
     if len(net_device) == 0:
         raise Exception("The vm seems to lack a Virtual Nic")
     try:
         # Use pg_map to set the new Portgroups
         for dev in net_device:
             #old_portgroup = dev.backing.port.portgroupKey
             #if pg_map.has_key(old_portgroup):
             dev.backing.port._obj.set_element_portgroupKey(pg_map)
             dev.backing.port._obj.set_element_portKey('')
             dev.backing.port._obj.set_element_switchUuid(uuid)
     
         # Invoke ReconfigVM_Task
         request = VI.ReconfigVM_TaskRequestMsg()
         _this = request.new__this(vm_obj._mor)
         _this.set_attribute_type(vm_obj._mor.get_attribute_type())
         request.set_element__this(_this)
     
         # Build a list of device change spec objects
         devs_changed = []
         for dev in net_device:
             spec = request.new_spec()
             dev_change = spec.new_deviceChange()
             dev_change.set_element_device(dev._obj)
             dev_change.set_element_operation("edit")
             devs_changed.append(dev_change)
     
         # Submit the device change list
         spec.set_element_deviceChange(devs_changed)
         request.set_element_spec(spec)
         ret = self.__s._proxy.ReconfigVM_Task(request)._returnval
     
         # Wait for the task to finish
         task = VITask(ret, self.__s)
     
         #status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
     except Exception,e:
         item = '虚拟机网络更改失败'
         detail = e.message
         key = 'change_network'
         self.ReportError(item,detail,key)
         pass
Example #9
0
    def destroy_node(self, node, ex_remove_files=True):
        """
        :param ex_remove_files: Remove all the files from the datastore.
        :type ex_remove_files: ``bool``
        """
        ex_remove_files = False
        vm = self._get_vm_for_node(node=node)

        server = self.connection.client

        # Based on code from
        # https://pypi.python.org/pypi/pyxenter
        if ex_remove_files:
            request = VI.Destroy_TaskRequestMsg()

            _this = request.new__this(vm._mor)
            _this.set_attribute_type(vm._mor.get_attribute_type())
            request.set_element__this(_this)

            # pylint: disable=no-member
            ret = server._proxy.Destroy_Task(request)._returnva
            # pylint: enable=no-member
            task = VITask(ret, server)

            # Wait for the task to finish
            status = task.wait_for_state(
                [task.STATE_SUCCESS, task.STATE_ERROR])

            if status == task.STATE_ERROR:
                raise LibcloudError('Error destroying node: %s' %
                                    (task.get_error_message()))
        else:
            request = VI.UnregisterVMRequestMsg()

            _this = request.new__this(vm._mor)
            _this.set_attribute_type(vm._mor.get_attribute_type())
            request.set_element__this(_this)
            ret = server._proxy.UnregisterVM(request)
            task = VITask(ret, server)

        return True
Example #10
0
 def remove_vnic(self, label):
     '''Function removes a vnic network from an existing vcenter.
     Arguments:
     vm: vm from which vnic should be removed,
     label: name of vnic
     **Note**
     Vnics must be removed wih each time this class is instantiated.
     VNICs are stored in a temporary dictionary, so if VNICs are not
     removed at the end of this class instance, then they will have
     to be manually removed in the vcenter
     '''
     vm = self.vm_json.get('name')
     vm_obj = self.server.get_vm_by_name(vm, self.datacenter)
     if not vm_obj:
         raise Exception("VM %s not found" % vm)
     net_device = None
     if (vm, label) not in self.vm_vnics:
         raise Exception("vcenter removeVNIC error: vm_vnics not found: " +
                         str((vm, label)))
     # Find Virtual Nic device
     for dev in vm_obj.properties.config.hardware.device:
         if (dev._type in [
                 "VirtualE1000", "VirtualE1000e", "VirtualPCNet32",
                 "VirtualVmxnet3"
         ] and dev.deviceInfo.label == self.vm_vnics[(vm, label)][0]):
             net_device = dev._obj
             break
     if not net_device:
         raise Exception("NIC not found")
     # Reconfigure
     request = VI.ReconfigVM_TaskRequestMsg()
     _this = request.new__this(vm_obj._mor)
     _this.set_attribute_type(vm_obj._mor.get_attribute_type())
     request.set_element__this(_this)
     spec = request.new_spec()
     dev_change = spec.new_deviceChange()
     dev_change.set_element_operation("remove")
     dev_change.set_element_device(net_device)
     spec.set_element_deviceChange([dev_change])
     request.set_element_spec(spec)
     ret = self.server._proxy.ReconfigVM_Task(request)._returnval
     # Wait for the task to finish
     task = VITask(ret, self.server)
     status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
     if status == task.STATE_SUCCESS:
         logger.info("   removing vnic in %s on vm %s successful " %
                     (label, vm))
     elif status == task.STATE_ERROR:
         raise Exception("Error removing vnic in %s on vm %s msg %s" %
                         (label, vm, task.get_error_message()))
Example #11
0
    def delete_vm(self, vm_name):
        vm = self._get_vm(vm_name)

        if vm.is_powered_on():
            self.stop_vm(vm_name)

        # When pysphere moves up to 0.1.8, we can just do:
        # vm.destroy()
        request = VI.Destroy_TaskRequestMsg()
        _this = request.new__this(vm._mor)
        _this.set_attribute_type(vm._mor.get_attribute_type())
        request.set_element__this(_this)
        rtn = self.api._proxy.Destroy_Task(request)._returnval

        task = VITask(rtn, self.api)
        status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
        if status == task.STATE_SUCCESS:
            return True
        else:
            return False
Example #12
0
def destroyGuest(host_con, guest_name):
    powerOffGuest(host_con, guest_name)
    try:
        vm = host_con.get_vm_by_name(guest_name)
        request = VI.Destroy_TaskRequestMsg()
        _this = request.new__this(vm._mor)
        _this.set_attribute_type(vm._mor.get_attribute_type())
        request.set_element__this(_this)
        ret = host_con._proxy.Destroy_Task(request)._returnval
        task = VITask(ret, host_con)
        print 'Waiting for VM to be deleted'
        status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
        if status == task.STATE_SUCCESS:
            result = 'Succesfully removed guest: %s' % guest_name
        elif status == task.STATE_ERROR:
            result = 'Failed to remove VM: %s\n%s' % (guest_name,
                                                      task.get_error_message())
    except Exception as e:
        result = 'Failed to remove VM: %s\n%s' % (guest_name, str(e))
    return result
Example #13
0
 def __execute_net_device_reconfig_task(self, vm_obj, net_device):
     request = VI.ReconfigVM_TaskRequestMsg()
     _this = request.new__this(vm_obj._mor)
     _this.set_attribute_type(vm_obj._mor.get_attribute_type())
     request.set_element__this(_this)
     spec = request.new_spec()
     dev_change = spec.new_deviceChange()
     dev_change.set_element_operation("edit")
     dev_change.set_element_device(net_device)
     spec.set_element_deviceChange([dev_change])
     request.set_element_spec(spec)
     ret = self.server._proxy.ReconfigVM_Task(request)._returnval
     # Wait for the task to finish
     task = VITask(ret, self.server)
     status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
     if status == task.STATE_SUCCESS:
         logger.info("Net device reconfig task on vm %s successful " %
                     (vm_obj.properties.name))
     elif status == task.STATE_ERROR:
         raise Exception("Error: Net device reconfig task on vm %s msg %s" %
                         (vm_obj.properties.name, task.get_error_message()))
Example #14
0
 def vm_delete(self, vminstance):
     try:
         for vms in vminstance:
             vm = vms["obj"]
             if vm.is_powered_on(): vm.power_off(sync_run=False)
             # Invoke Destroy_Task
             request = VI.Destroy_TaskRequestMsg()
             _this = request.new__this(vm._mor)
             _this.set_attribute_type(vm._mor.get_attribute_type())
             request.set_element__this(_this)
             ret = self.server._proxy.Destroy_Task(request)._returnval
             # Wait for the task to finish
             task = VITask(ret, self.server)
             status = task.wait_for_state(
                 [task.STATE_SUCCESS, task.STATE_ERROR])
             #if status == task.STATE_SUCCESS:
             #   return True
             #elif status == task.STATE_ERROR:
             #   print "Error removing vm:", task.get_error_message()
             #  return False
         return True
     except Exception, e:
         return False
Example #15
0
def change_dvs_net(s, vm_obj, hostname, dstlabel, curlabel):
    """Takes a VIServer and VIVirtualMachine object and reconfigures
    dVS portgroups according to the mappings in the pg_map dict. The
    pg_map dict must contain the source portgroup as key and the
    destination portgroup as value"""

    # Find virtual NIC devices
    pg_map = {}

    if vm_obj:
        net_device = []
        for dev in vm_obj.properties.config.hardware.device:
            if dev._type in [
                    "VirtualE1000", "VirtualE1000e", "VirtualPCNet32",
                    "VirtualVmxnet", "VirtualNmxnet2", "VirtualVmxnet3"
            ]:
                net_device.append(dev)
    if len(net_device) == 0:
        raise Exception("The vm seems to lack a Virtual Nic")

    # Lets get the information for the port group
    network_name = dstlabel
    network_name2 = curlabel

    for ds_mor, name in s.get_datacenters().items():
        dcprops = VIProperty(s, ds_mor)
        break

    # networkFolder managed object reference
    nfmor = dcprops.networkFolder._obj
    dvpg_mors = s._retrieve_properties_traversal(
        property_names=['name', 'key'],
        from_node=nfmor,
        obj_type='DistributedVirtualPortgroup')

    # Get the portgroup managed object.
    dvpg_mor = None
    for dvpg in dvpg_mors:
        if dvpg_mor:
            break
        for p in dvpg.PropSet:
            if p.Name == "name" and p.Val == network_name:
                dvpg_mor = dvpg
            if dvpg_mor:
                break

    # Get the portgroup managed object.
    dvpg_mor2 = None
    for dvpg2 in dvpg_mors:
        if dvpg_mor2:
            break
        for p in dvpg2.PropSet:
            if p.Name == "name" and p.Val == network_name2:
                dvpg_mor2 = dvpg2
            if dvpg_mor2:
                break

    if dvpg_mor == None:
        print "Didnt find the dvpg %s, exiting now" % (network_name)
        exit()

    if dvpg_mor2 == None:
        print "Didnt find the dvpg %s, exiting now" % (network_name)
        exit()

    # Get the portgroup key
    portgroupKey = None
    for p in dvpg_mor.PropSet:
        if p.Name == "key":
            portgroupKey = p.Val
    portgroupKey2 = None
    for p in dvpg_mor2.PropSet:
        if p.Name == "key":
            portgroupKey2 = p.Val

    # Use pg_map to set the new Portgroups
    pg_map[portgroupKey2] = portgroupKey
    for dev in net_device:
        old_portgroup = dev.backing.port.portgroupKey
        if pg_map.has_key(old_portgroup):
            dev.backing.port._obj.set_element_portgroupKey(
                pg_map[old_portgroup])
            dev.backing.port._obj.set_element_portKey('')

    # Invoke ReconfigVM_Task
    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vm_obj._mor)
    _this.set_attribute_type(vm_obj._mor.get_attribute_type())
    request.set_element__this(_this)

    # Build a list of device change spec objects
    devs_changed = []
    for dev in net_device:
        spec = request.new_spec()
        dev_change = spec.new_deviceChange()
        dev_change.set_element_device(dev._obj)
        dev_change.set_element_operation("edit")
        devs_changed.append(dev_change)

    # Submit the device change list
    spec.set_element_deviceChange(devs_changed)
    request.set_element_spec(spec)
    ret = s._proxy.ReconfigVM_Task(request)._returnval

    # Wait for the task to finish
    task = VITask(ret, s)

    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status == task.STATE_SUCCESS:
        print "SUCCESS --> VM '%s' successfully reconfigured!" % hostname
    elif status == task.STATE_ERROR:
        print "ERROR --> Something went wrong reconfiguring vm '%s'!" % hostname, task.get_error_message(
        )
    else:
        print "ERROR --> VM '%s' not found!" % hostname
Example #16
0
            for vm in vm_mors:
                if vm not in folder_children:
                    temp_mors.append(vm)
            vm_mors = temp_mors
            if len(vm_mors) == 0:
                viserver.disconnect()
                module.exit_json(changed=False)
        except Exception as e:
            viserver.disconnect()
            module.fail_json(msg=str(e))

        try:
            req = VI.MoveIntoFolder_TaskRequestMsg()
            req.set_element__this(folder_mor)
            req.set_element_list(vm_mors)
            task = VITask(
                viserver._proxy.MoveIntoFolder_Task(req).Returnval, viserver)
            task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])

            if task.get_state() == task.STATE_ERROR:
                viserver.disconnect()
                module.fail_json(
                    msg="Error moving vm: %s to folder %s. Error: %s" %
                    (found_vms, json.dumps(folder_structure),
                     task.get_error_message()))
            else:
                changed = True
        except Exception as e:
            viserver.disconnect()
            module.fail_json(msg="Error Requesting VM Move: %s for VM: %s" %
                             (found_vms, json.dumps(folder_structure), str(e)))
Example #17
0
    #create vm request
    config.set_element_deviceChange(devices)
    create_vm_request.set_element_config(config)
    new_vmf_mor = create_vm_request.new__this(vmf_mor)
    new_vmf_mor.set_attribute_type(vmf_mor.get_attribute_type())
    new_rp_mor = create_vm_request.new_pool(rp_mor)
    new_rp_mor.set_attribute_type(rp_mor.get_attribute_type())
    new_host_mor = create_vm_request.new_host(host_mor)
    new_host_mor.set_attribute_type(host_mor.get_attribute_type())
    create_vm_request.set_element__this(new_vmf_mor)
    create_vm_request.set_element_pool(new_rp_mor)
    create_vm_request.set_element_host(new_host_mor)

    #finally actually create the guest :)
    task_mor = host_con._proxy.CreateVM_Task(create_vm_request)._returnval
    task = VITask(task_mor, host_con)
    task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])

    if task.get_state() == task.STATE_ERROR:
        return "Cannot create guest: " + task.get_error_message()
    else:
        return "Succesfully created guest: " + guest_name


def getMac(host_con, guest_name):
    vm = host_con.get_vm_by_name(guest_name)
    net = vm.get_property('net', from_cache=False)
    if net:
        for interface in net:
            mac = interface.get('mac_address', None)
            if mac:
def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name, guest, vm_extra_config, vm_hardware, vm_disk, vm_nic, state, force):
    spec = None
    changed = False
    changes = {}
    request = VI.ReconfigVM_TaskRequestMsg()
    shutdown = False
    poweron = vm.is_powered_on()

    memoryHotAddEnabled = bool(vm.properties.config.memoryHotAddEnabled)
    cpuHotAddEnabled = bool(vm.properties.config.cpuHotAddEnabled)
    cpuHotRemoveEnabled = bool(vm.properties.config.cpuHotRemoveEnabled)

    # Change Memory
    if 'memory_mb' in vm_hardware:

        if int(vm_hardware['memory_mb']) != vm.properties.config.hardware.memoryMB:
            spec = spec_singleton(spec, request, vm)

            if vm.is_powered_on():
                if force:
                    # No hot add but force
                    if not memoryHotAddEnabled:
                        shutdown = True
                    elif int(vm_hardware['memory_mb']) < vm.properties.config.hardware.memoryMB:
                        shutdown = True
                else:
                    # Fail on no hot add and no force
                    if not memoryHotAddEnabled:
                        module.fail_json(
                            msg="memoryHotAdd is not enabled. force is "
                            "required for shutdown")

                    # Fail on no force and memory shrink
                    elif int(vm_hardware['memory_mb']) < vm.properties.config.hardware.memoryMB:
                        module.fail_json(
                            msg="Cannot lower memory on a live VM. force is "
                            "required for shutdown")

            # set the new RAM size
            spec.set_element_memoryMB(int(vm_hardware['memory_mb']))
            changes['memory'] = vm_hardware['memory_mb']

    # ====( Config Memory )====#
    if 'num_cpus' in vm_hardware:
        if int(vm_hardware['num_cpus']) != vm.properties.config.hardware.numCPU:
            spec = spec_singleton(spec, request, vm)

            if vm.is_powered_on():
                if force:
                    # No hot add but force
                    if not cpuHotAddEnabled:
                        shutdown = True
                    elif int(vm_hardware['num_cpus']) < vm.properties.config.hardware.numCPU:
                        if not cpuHotRemoveEnabled:
                            shutdown = True
                else:
                    # Fail on no hot add and no force
                    if not cpuHotAddEnabled:
                        module.fail_json(
                            msg="cpuHotAdd is not enabled. force is "
                            "required for shutdown")

                    # Fail on no force and cpu shrink without hot remove
                    elif int(vm_hardware['num_cpus']) < vm.properties.config.hardware.numCPU:
                        if not cpuHotRemoveEnabled:
                            module.fail_json(
                                msg="Cannot lower CPU on a live VM without "
                                "cpuHotRemove. force is required for shutdown")

            spec.set_element_numCPUs(int(vm_hardware['num_cpus']))

            changes['cpu'] = vm_hardware['num_cpus']

    if len(changes):

        if shutdown and vm.is_powered_on():
            try:
                vm.power_off(sync_run=True)
                vm.get_status()

            except Exception, e:
                module.fail_json(
                    msg='Failed to shutdown vm %s: %s' % (guest, e)
                )

        request.set_element_spec(spec)
        ret = vsphere_client._proxy.ReconfigVM_Task(request)._returnval

        # Wait for the task to finish
        task = VITask(ret, vsphere_client)
        status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
        if status == task.STATE_SUCCESS:
            changed = True
        elif status == task.STATE_ERROR:
            module.fail_json(
                msg="Error reconfiguring vm: %s" % task.get_error_message())

        if vm.is_powered_off() and poweron:
            try:
                vm.power_on(sync_run=True)
            except Exception, e:
                module.fail_json(
                    msg='Failed to power on vm %s : %s' % (guest, e)
                )
Example #19
0
 def add_vnic(self, network):
     '''
     addVNIC adds a vnic to the desired vm_name
     Arguments:
     vm_name is the name of the vm that you wish to add the vnic to.
     network: the name of the vnic that you are adding
     **Note**
     Vnics must be removed wih each time this class is instantiated.
     VNICs are stored in a temporary dictionary, so if VNICs are not
     removed at the end of this class instance, then they will have
     to be manually removed in the vcenter
     '''
     vm_name = self.vm_json.get('name')
     # datacentername = "Dev_Datacenter"
     # hostname = "esx14.mgt.hawaii.edu"
     network_name = network
     s = self.server
     dcmor = None
     datacenters = s.get_datacenters()
     logger.info('datacenters ', datacenters)
     # "GET INITIAL PROPERTIES AND OBJECTS"
     for i in datacenters:
         vm_list = s.get_registered_vms(i)  # list of vms
         for k in vm_list:
             if vm_name in k:
                 vm = k
                 dcmor = i
                 break
     if dcmor is None:
         logger.info("Datacenters: %s " % (datacenters))
         for k, v in datacenters.items():
             # When a VM is renamed, pysphere query to vcenter does not seem
             # to find the new vm name in any data center. So when we don't
             # find a vm in any data center, we assume it belongs in
             # Sunnyvale datacenter.
             # TODO(bindu): add an option to testbed file to pass this as
             # an option.
             if v == 'Sunnyvale':
                 logger.info('Datacenter for Sunnyvale %s' % k)
                 logger.info('Failed to find VM %s in any data center %s' %
                             vm_name)
                 logger.info(
                     '   VM might have been renamed. Assume datacenter Sunnyvale'
                 )
                 dcmor = k
                 break
         # raise Exception("Failed to find VM %s in any data center" % (vm_name))
     dcprops = VIProperty(s, dcmor)
     # "networkFolder managed object reference"
     nfmor = dcprops.networkFolder._obj
     dvpg_mors = s._retrieve_properties_traversal(
         property_names=['name', 'key'],
         from_node=nfmor,
         obj_type='DistributedVirtualPortgroup')
     # "get the portgroup managed object."
     dvpg_mor = None
     for dvpg in dvpg_mors:
         if dvpg_mor:
             break
         for p in dvpg.PropSet:
             if p.Name == "name" and p.Val == network_name:
                 dvpg_mor = dvpg
             if dvpg_mor:
                 break
     if dvpg_mor is None:
         raise Exception("Didn't find the dvpg %s, exiting now" %
                         (network_name))
     # "Get the portgroup key"
     portgroupKey = None
     for p in dvpg_mor.PropSet:
         if p.Name == "key":
             portgroupKey = p.Val
     # "Grab the dvswitch uuid and portgroup properties"
     dvswitch_mors = s._retrieve_properties_traversal(
         property_names=['uuid', 'portgroup'],
         from_node=nfmor,
         obj_type='DistributedVirtualSwitch')
     dvswitch_mor = None
     # "Get the appropriate dvswitches managed object"
     for dvswitch in dvswitch_mors:
         if dvswitch_mor:
             break
         for p in dvswitch.PropSet:
             if p.Name == "portgroup":
                 pg_mors = p.Val.ManagedObjectReference
                 for pg_mor in pg_mors:
                     if dvswitch_mor:
                         break
                     key_mor = s._get_object_properties(
                         pg_mor, property_names=['key'])
                     for key in key_mor.PropSet:
                         if key.Val == portgroupKey:
                             dvswitch_mor = dvswitch
     # Get the switches uuid
     dvswitch_uuid = None
     for p in dvswitch_mor.PropSet:
         if p.Name == "uuid":
             dvswitch_uuid = p.Val
     # create_vm_request = VI.CreateVM_TaskRequestMsg()
     # config = create_vm_request.new_config()
     vm_obj = s.get_vm_by_name(vm_name, self.datacenter)
     vm = vm_obj
     net_device_mac = []
     for dev in vm.properties.config.hardware.device:
         if (dev._type in [
                 "VirtualE1000", "VirtualE1000e", "VirtualPCNet32",
                 "VirtualVmxnet3"
         ]):
             # print dev.macAddress
             net_device_mac.append(dev.macAddress)
     vm_obj = s.get_vm_by_name(vm_name, self.datacenter)
     # Invoke ReconfigVM_Task
     request = VI.ReconfigVM_TaskRequestMsg()
     _this = request.new__this(vm_obj._mor)  # get the resource pool
     _this.set_attribute_type(vm_obj._mor.get_attribute_type())
     request.set_element__this(_this)
     spec = request.new_spec()
     # add a NIC. the network Name must be set as the device name.
     dev_change = spec.new_deviceChange()
     dev_change.set_element_operation("add")
     nic_ctlr = VI.ns0.VirtualPCNet32_Def("nic_ctlr").pyclass()
     # nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def("nic_backing").pyclass()
     # nic_backing.set_element_deviceName(label)
     nic_backing_port = VI.ns0.DistributedVirtualSwitchPortConnection_Def(
         "nic_backing_port").pyclass()
     nic_backing_port.set_element_switchUuid(dvswitch_uuid)
     nic_backing_port.set_element_portgroupKey(portgroupKey)
     nic_backing = VI.ns0.VirtualEthernetCardDistributedVirtualPortBackingInfo_Def(
         "nic_backing").pyclass()
     nic_backing.set_element_port(nic_backing_port)
     # print inspect.getmembers(nic_backing)
     # nic_backing.deviceName == network
     nic_ctlr.set_element_addressType("generated")
     nic_ctlr.set_element_backing(nic_backing)
     nic_ctlr.set_element_key(4)
     dev_change.set_element_device(nic_ctlr)
     spec.set_element_deviceChange([dev_change])
     request.set_element_spec(spec)
     ret = self.server._proxy.ReconfigVM_Task(request)._returnval
     # net_device.Connectable.Connected = True
     # Wait for the task to finish
     task = VITask(ret, self.server)
     status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
     vm = self.server.get_vm_by_name(vm_name, self.datacenter)
     for dev in vm.properties.config.hardware.device:
         if (dev._type in [
                 "VirtualE1000", "VirtualE1000e", "VirtualPCNet32",
                 "VirtualVmxnet3"
         ] and dev.macAddress not in net_device_mac):
             # print dev.macAddress
             self.vm_vnics[(vm_name, network)] = (dev.deviceInfo.label,
                                                  dev.macAddress)
     if status == task.STATE_SUCCESS:
         logger.info("vnic %s on vm %s successfully added" %
                     (dev.macAddress, vm_name))
     elif status == task.STATE_ERROR:
         logger.info("Error adding vm: %s" % vm_name,
                     task.get_error_message())
     return dev.macAddress
Example #20
0
def createGuest(host_con,guest_dc,guest_host,guest_name,guest_ver,guest_mem,guest_cpu,guest_iso,guest_os,guest_disk_gb,guest_ds,guest_network,guest_enterbios):
    #get dc MOR from list
    dc_list=[k for k,v in host_con.get_datacenters().items() if v==guest_dc]
    if dc_list:
        dc_mor=dc_list[0]
    else:
        host_con.disconnect()
        return "Cannot find dc: "+guest_dc
    dc_props=VIProperty(host_con, dc_mor)
    #get vmFolder
    vmf_mor = dc_props.vmFolder._obj
    #get hostfolder MOR
    hf_mor=dc_props.hostFolder._obj
    #get computer resources MORs
    cr_mors=host_con._retrieve_properties_traversal(property_names=['name','host'],from_node=hf_mor,obj_type='ComputeResource')
    #get host MOR
    try:
        host_mor=[k for k,v in host_con.get_hosts().items() if v==guest_host][0]
    except IndexError:
        host_con.disconnect()
        return "Cannot find host: "+guest_host
    #get computer resource MOR for host
    cr_mor=None
    for cr in cr_mors:
        if cr_mor:
            break
        for p in cr.PropSet:
            if p.Name=="host":
                for h in p.Val.get_element_ManagedObjectReference():
                    if h==host_mor:
                         cr_mor=cr.Obj
                         break
                if cr_mor:
                    break
    cr_props=VIProperty(host_con,cr_mor)
    #get resource pool MOR
    rp_mor=cr_props.resourcePool._obj

    #build guest properties
    #get config target
    request=VI.QueryConfigTargetRequestMsg()
    _this=request.new__this(cr_props.environmentBrowser._obj)
    _this.set_attribute_type(cr_props.environmentBrowser._obj.get_attribute_type())
    request.set_element__this(_this)
    h=request.new_host(host_mor)
    h.set_attribute_type(host_mor.get_attribute_type())
    request.set_element_host(h)
    config_target=host_con._proxy.QueryConfigTarget(request)._returnval
    #get default devices
    request=VI.QueryConfigOptionRequestMsg()
    _this=request.new__this(cr_props.environmentBrowser._obj)
    _this.set_attribute_type(cr_props.environmentBrowser._obj.get_attribute_type())
    request.set_element__this(_this)
    h=request.new_host(host_mor)
    h.set_attribute_type(host_mor.get_attribute_type())
    request.set_element_host(h)
    config_option=host_con._proxy.QueryConfigOption(request)._returnval
    defaul_devs=config_option.DefaultDevice
    #get network names
    if guest_network:
        net_name=guest_network
    else:
        for net in config_target.Network:
            if net.Network.Accessible:
                net_name = net.Network.Name
    #get ds
    ds_target = None
    for d in config_target.Datastore:
        if d.Datastore.Accessible and (guest_ds and d.Datastore.Name==guest_ds) or (not guest_ds):
            ds_target=d.Datastore.Datastore
            guest_ds=d.Datastore.Name
            break
    if not ds_target:
        host_con.disconnect()
        return "Cannot find datastore: "+guest_ds
    ds_vol_name="[%s]" % guest_ds

    #create task request
    create_vm_request=VI.CreateVM_TaskRequestMsg()
    config=create_vm_request.new_config()
    #set location of vmx
    vm_files=config.new_files()
    vm_files.set_element_vmPathName(ds_vol_name)
    config.set_element_files(vm_files)
    if guest_enterbios:
        #set boot parameters
        vmboot=config.new_bootOptions()
        vmboot.set_element_enterBIOSSetup(True)
        config.set_element_bootOptions(vmboot)
    #set general parameters
    config.set_element_version(guest_ver)
    config.set_element_name(guest_name)
    config.set_element_memoryMB(guest_mem)
    config.set_element_memoryHotAddEnabled(True)
    config.set_element_numCPUs(guest_cpu)
    config.set_element_guestId(guest_os)
    config.set_element_cpuHotAddEnabled(True)

    #create devices
    devices = []
    #add controller to devices
    disk_ctrl_key=1
    scsi_ctrl_spec=config.new_deviceChange()
    scsi_ctrl_spec.set_element_operation('add')
    scsi_ctrl=VI.ns0.ParaVirtualSCSIController_Def("scsi_ctrl").pyclass()
    scsi_ctrl.set_element_busNumber(0)
    scsi_ctrl.set_element_key(disk_ctrl_key)
    scsi_ctrl.set_element_sharedBus("noSharing")
    scsi_ctrl_spec.set_element_device(scsi_ctrl)
    devices.append(scsi_ctrl_spec)
    #find ide controller
    ide_ctlr = None
    for dev in defaul_devs:
        if dev.typecode.type[1] == "VirtualIDEController":
            ide_ctlr = dev
    #add cdrom
    if ide_ctlr:
        cd_spec = config.new_deviceChange()
        cd_spec.set_element_operation('add')
        cd_ctrl = VI.ns0.VirtualCdrom_Def("cd_ctrl").pyclass()
        cd_device_backing =VI.ns0.VirtualCdromIsoBackingInfo_Def("cd_device_backing").pyclass()
        ds_ref = cd_device_backing.new_datastore(ds_target)
        ds_ref.set_attribute_type(ds_target.get_attribute_type())
        cd_device_backing.set_element_datastore(ds_ref)
        cd_device_backing.set_element_fileName("%s %s" % (ds_vol_name,guest_iso))
        cd_ctrl.set_element_backing(cd_device_backing)
        cd_ctrl.set_element_key(20)
        cd_ctrl.set_element_controllerKey(ide_ctlr.get_element_key())
        cd_ctrl.set_element_unitNumber(0)
        cd_spec.set_element_device(cd_ctrl)
        devices.append(cd_spec)
    #add disk
    disk_spec=config.new_deviceChange()
    disk_spec.set_element_fileOperation("create")
    disk_spec.set_element_operation("add")
    disk_ctlr=VI.ns0.VirtualDisk_Def("disk_ctlr").pyclass()
    disk_backing=VI.ns0.VirtualDiskFlatVer2BackingInfo_Def("disk_backing").pyclass()
    disk_backing.set_element_fileName(ds_vol_name)
    disk_backing.set_element_diskMode("persistent")
    disk_ctlr.set_element_key(0)
    disk_ctlr.set_element_controllerKey(disk_ctrl_key)
    disk_ctlr.set_element_unitNumber(0)
    disk_ctlr.set_element_backing(disk_backing)
    guest_disk_size=guest_disk_gb*1024*1024
    disk_ctlr.set_element_capacityInKB(guest_disk_size)
    disk_spec.set_element_device(disk_ctlr)
    devices.append(disk_spec)
    #add a network controller
    nic_spec = config.new_deviceChange()
    if net_name:
        nic_spec.set_element_operation("add")
        nic_ctlr = VI.ns0.VirtualVmxnet3_Def("nic_ctlr").pyclass()
        nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def("nic_backing").pyclass()
        nic_backing.set_element_deviceName(net_name)
        nic_ctlr.set_element_addressType("generated")
        nic_ctlr.set_element_backing(nic_backing)
        nic_ctlr.set_element_key(4)
        nic_spec.set_element_device(nic_ctlr)
        devices.append(nic_spec)

    #create vm request
    config.set_element_deviceChange(devices)
    create_vm_request.set_element_config(config)
    new_vmf_mor=create_vm_request.new__this(vmf_mor)
    new_vmf_mor.set_attribute_type(vmf_mor.get_attribute_type())
    new_rp_mor=create_vm_request.new_pool(rp_mor)
    new_rp_mor.set_attribute_type(rp_mor.get_attribute_type())
    new_host_mor=create_vm_request.new_host(host_mor)
    new_host_mor.set_attribute_type(host_mor.get_attribute_type())
    create_vm_request.set_element__this(new_vmf_mor)
    create_vm_request.set_element_pool(new_rp_mor)
    create_vm_request.set_element_host(new_host_mor)

    #finally actually create the guest :)
    task_mor=host_con._proxy.CreateVM_Task(create_vm_request)._returnval
    task=VITask(task_mor,host_con)
    task.wait_for_state([task.STATE_SUCCESS,task.STATE_ERROR])

    if task.get_state()==task.STATE_ERROR:
        return "Cannot create guest: "+task.get_error_message()
    else:
        return "Succesfully created guest: "+guest_name
Example #21
0
# Add a CD-ROM device to the VM.
add_cdrom("iso", cd_iso_location)
add_nic("vmxnet3", network_name, "dvs")
#add_nic("vmxnet3", "VM Network", "standard")

config.set_element_deviceChange(devices)
create_vm_request.set_element_config(config)
folder_mor = create_vm_request.new__this(vmfmor)
folder_mor.set_attribute_type(vmfmor.get_attribute_type())
create_vm_request.set_element__this(folder_mor)
rp_mor = create_vm_request.new_pool(rpmor)
rp_mor.set_attribute_type(rpmor.get_attribute_type())
create_vm_request.set_element_pool(rp_mor)
host_mor = create_vm_request.new_host(hostmor)
host_mor.set_attribute_type(hostmor.get_attribute_type())
create_vm_request.set_element_host(host_mor)

# CREATE THE VM
taskmor = s._proxy.CreateVM_Task(create_vm_request)._returnval
task = VITask(taskmor, s)
task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
if task.get_state() == task.STATE_ERROR:
    raise Exception("Error creating vm: %s" % task.get_error_message())

# If there is any extra config options specified, set them here.
if extra_config:
    vm = s.get_vm_by_name(vmname)
    vm.set_extra_config(extra_config)

s.disconnect()
    config.set_element_deviceChange(devices)
    create_vm_request.set_element_config(config)
    folder_mor = create_vm_request.new__this(vmfmor)
    folder_mor.set_attribute_type(vmfmor.get_attribute_type())
    create_vm_request.set_element__this(folder_mor)
    rp_mor = create_vm_request.new_pool(rpmor)
    rp_mor.set_attribute_type(rpmor.get_attribute_type())
    create_vm_request.set_element_pool(rp_mor)
    host_mor = create_vm_request.new_host(hostmor)
    host_mor.set_attribute_type(hostmor.get_attribute_type())
    create_vm_request.set_element_host(host_mor)

    # CREATE THE VM
    taskmor = vsphere_client._proxy.CreateVM_Task(create_vm_request)._returnval
    task = VITask(taskmor, vsphere_client)
    task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if task.get_state() == task.STATE_ERROR:
        vsphere_client.disconnect()
        module.fail_json(msg="Error creating vm: %s" %
                         task.get_error_message())
    else:
        # We always need to get the vm because we are going to gather facts
        vm = vsphere_client.get_vm_by_name(guest)

        # VM was created. If there is any extra config options specified, set
        # them here , disconnect from vcenter, then exit.
        if vm_extra_config:
            vm.set_extra_config(vm_extra_config)

        # Power on the VM if it was requested