예제 #1
0
    def rename(self, new_name, sync_run=True):
        """
        Renames this managed entity.
          * new_name: Any / (slash), \ (backslash), character used in this name
            element will be escaped. Similarly, any % (percent) character used
            in this name element will be escaped, unless it is used to start an
            escape sequence. A slash is escaped as %2F or %2f. A backslash is
            escaped as %5C or %5c, and a percent is escaped as %25.
          * sync_run: (default True), If False does not wait for the task to
            finish and returns an instance of a VITask for the user to monitor
            its progress 
        """
        try:
            request = VI.Rename_TaskRequestMsg()
            _this = request.new__this(self._mor)
            _this.set_attribute_type(self._mor.get_attribute_type())
            request.set_element__this(_this)
            request.set_element_newName(new_name)

            task = self._server._proxy.Rename_Task(request)._returnval
            vi_task = VITask(task, self._server)
            if sync_run:
                status = vi_task.wait_for_state([vi_task.STATE_SUCCESS,
                                                 vi_task.STATE_ERROR])
                if status == vi_task.STATE_ERROR:
                    raise VIException(vi_task.get_error_message(),
                                      FaultTypes.TASK_ERROR)
                return

            return vi_task

        except (VI.ZSI.FaultException) as e:
            raise VIApiException(e)
예제 #2
0
    def destroy(self, sync_run=True):
        """
        Destroys this object, deleting its contents and removing it from its 
        parent folder (if any)
        * sync_run: (default True), If False does not wait for the task to
            finish and returns an instance of a VITask for the user to monitor
            its progress
        """
        try:
            request = VI.Destroy_TaskRequestMsg()
            _this = request.new__this(self._mor)
            _this.set_attribute_type(self._mor.get_attribute_type())
            request.set_element__this(_this)
            

            task = self._server._proxy.Destroy_Task(request)._returnval
            vi_task = VITask(task, self._server)
            if sync_run:
                status = vi_task.wait_for_state([vi_task.STATE_SUCCESS,
                                                 vi_task.STATE_ERROR])
                if status == vi_task.STATE_ERROR:
                    raise VIException(vi_task.get_error_message(),
                                      FaultTypes.TASK_ERROR)
                return

            return vi_task
        except (VI.ZSI.FaultException) as e:
            raise VIApiException(e)
예제 #3
0
def delete_vm(vm):
    vm_name = vm.get_property('name', from_cache=False)
    log("Preparing to delete VM %s..." % vm_name)
    vm_status = vm.get_status()
    log("VM status: %s" % vm_status)
    if vm_status == "POWERED OFF" or vm_status == "POWERING OFF":
        log("VM power state: %s" % vm_status)
    else:
        log("Powering off VM %s..." % vm_name)
        vm.power_off()

    log("Deleting VM %s..." % vm_name)
    request = VI.Destroy_TaskRequestMsg()
    _this = request.new__this(vm._mor)
    _this.set_attribute_type(vm._mor.get_attribute_type())
    request.set_element__this(_this)
    ret = server._proxy.Destroy_Task(request)._returnval

    # Wait for the delete task to finish
    task = VITask(ret, server)

    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status == task.STATE_SUCCESS:
        log("VM successfully deleted from disk.")
    elif status == task.STATE_ERROR:
        error_msg = "Error while deleting VM: " + task.get_error_message()
        raise Exception(error_msg)
예제 #4
0
    def add_existence_vmdk(self, vm_name, path):
        """
        Add existence hard drive (.vmdk) to the virtual machine
        :param vm_name: virtual machine name
        :param path: hard drive path
        :param space: space for hard drive
        :raise: ExistenceException, CreatorException
        """
        self._connect_to_esx()
        try:
            vm = self.esx_server.get_vm_by_name(vm_name)
        except Exception:
            raise ExistenceException("Couldn't find the virtual machine %s" % vm_name)

        unit_number = -1
        for disk in vm._disks:
            unit_number = max(unit_number, disk["device"]["unitNumber"])
        unit_number += 1

        request = VI.ReconfigVM_TaskRequestMsg()
        _this = request.new__this(vm._mor)
        _this.set_attribute_type(vm._mor.get_attribute_type())
        request.set_element__this(_this)

        spec = request.new_spec()

        dc = spec.new_deviceChange()
        dc.Operation = "add"

        hd = VI.ns0.VirtualDisk_Def("hd").pyclass()
        hd.Key = -100
        hd.UnitNumber = unit_number
        hd.CapacityInKB = 0
        hd.ControllerKey = 1000

        backing = VI.ns0.VirtualDiskFlatVer2BackingInfo_Def("backing").pyclass()
        backing.FileName = path
        backing.DiskMode = "persistent"
        backing.ThinProvisioned = False
        hd.Backing = backing

        connectable = hd.new_connectable()
        connectable.StartConnected = True
        connectable.AllowGuestControl = False
        connectable.Connected = True
        hd.Connectable = connectable

        dc.Device = hd

        spec.DeviceChange = [dc]
        request.Spec = spec

        task = self.esx_server._proxy.ReconfigVM_Task(request)._returnval
        vi_task = VITask(task, self.esx_server)

        # Wait for task to finis
        status = vi_task.wait_for_state([vi_task.STATE_SUCCESS, vi_task.STATE_ERROR])
        if status == vi_task.STATE_ERROR:
            self._disconnect_from_esx()
            raise CreatorException("ERROR CONFIGURING VM:%s" % vi_task.get_error_message())
예제 #5
0
def delete_vm(vsphere_client, module, guest, vm, force):
    try:

        if vm.is_powered_on():
            if force:
                try:
                    vm.power_off(sync_run=True)
                    vm.get_status()

                except Exception, e:
                    module.fail_json(
                        msg='Failed to shutdown vm %s: %s' % (guest, e))
            else:
                module.fail_json(
                    msg='You must use either shut the vm down first or '
                    'use force ')

        # Invoke Destroy_Task
        request = VI.Destroy_TaskRequestMsg()
        _this = request.new__this(vm._mor)
        _this.set_attribute_type(vm._mor.get_attribute_type())
        request.set_element__this(_this)
        ret = vsphere_client._proxy.Destroy_Task(request)._returnval
        task = VITask(ret, vsphere_client)

        # Wait for the task to finish
        status = task.wait_for_state(
            [task.STATE_SUCCESS, task.STATE_ERROR])
        if status == task.STATE_ERROR:
            vsphere_client.disconnect()
            module.fail_json(msg="Error removing vm: %s %s" %
                             task.get_error_message())
        module.exit_json(changed=True, changes="VM %s deleted" % guest)
예제 #6
0
    def destroy_vm(self, vmname):
        """
        Destroys virtual machine by name
        :param vmname: virtual machine name
        :raise: ExistenceException, CreatorException
        """

        self._connect_to_esx()

        try:
            vm = self.esx_server.get_vm_by_name(vmname)
        except Exception as error:
            self._disconnect_from_esx()
            raise ExistenceException("Couldn't find VM '%s' - %s" % (vmname, error.message))

        try:
            if vm.is_powered_on() or vm.is_powering_off() or vm.is_reverting():
                vm.power_off()
            request = VI.Destroy_TaskRequestMsg()
            _this = request.new__this(vm._mor)
            _this.set_attribute_type(vm._mor.get_attribute_type())
            request.set_element__this(_this)
            ret = self.esx_server._proxy.Destroy_Task(request)._returnval

            # Wait for the task to finish
            task = VITask(ret, self.esx_server)

            status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
            if status != task.STATE_SUCCESS:
                raise CreatorException("Couldn't destroy vm - " + task.get_error_message())
        except Exception:
            self._disconnect_from_esx()
            raise CreatorException("Couldn't destroy the virtual machine %s" % vmname)
예제 #7
0
def delete_vm(vsphere_client, module, guest, vm, force):
    try:

        if vm.is_powered_on():
            if force:
                try:
                    vm.power_off(sync_run=True)
                    vm.get_status()

                except Exception, e:
                    module.fail_json(
                        msg='Failed to shutdown vm %s: %s' % (guest, e))
            else:
                module.fail_json(
                    msg='You must use either shut the vm down first or '
                    'use force ')

        # Invoke Destroy_Task
        request = VI.Destroy_TaskRequestMsg()
        _this = request.new__this(vm._mor)
        _this.set_attribute_type(vm._mor.get_attribute_type())
        request.set_element__this(_this)
        ret = vsphere_client._proxy.Destroy_Task(request)._returnval
        task = VITask(ret, vsphere_client)

        # Wait for the task to finish
        status = task.wait_for_state(
            [task.STATE_SUCCESS, task.STATE_ERROR])
        if status == task.STATE_ERROR:
            vsphere_client.disconnect()
            module.fail_json(msg="Error removing vm: %s %s" %
                             task.get_error_message())
        module.exit_json(changed=True, changes="VM %s deleted" % guest)
예제 #8
0
    def rename(self, new_name, sync_run=True):
        """
        Renames this managed entity.
          * new_name: Any / (slash), \ (backslash), character used in this name
            element will be escaped. Similarly, any % (percent) character used
            in this name element will be escaped, unless it is used to start an
            escape sequence. A slash is escaped as %2F or %2f. A backslash is
            escaped as %5C or %5c, and a percent is escaped as %25.
          * sync_run: (default True), If False does not wait for the task to
            finish and returns an instance of a VITask for the user to monitor
            its progress 
        """
        try:
            request = VI.Rename_TaskRequestMsg()
            _this = request.new__this(self._mor)
            _this.set_attribute_type(self._mor.get_attribute_type())
            request.set_element__this(_this)
            request.set_element_newName(new_name)

            task = self._server._proxy.Rename_Task(request)._returnval
            vi_task = VITask(task, self._server)
            if sync_run:
                status = vi_task.wait_for_state(
                    [vi_task.STATE_SUCCESS, vi_task.STATE_ERROR])
                if status == vi_task.STATE_ERROR:
                    raise VIException(vi_task.get_error_message(),
                                      FaultTypes.TASK_ERROR)
                return

            return vi_task

        except (VI.ZSI.FaultException) as e:
            raise VIApiException(e)
예제 #9
0
    def destroy(self, sync_run=True):
        """
        Destroys this object, deleting its contents and removing it from its 
        parent folder (if any)
        * sync_run: (default True), If False does not wait for the task to
            finish and returns an instance of a VITask for the user to monitor
            its progress
        """
        try:
            request = VI.Destroy_TaskRequestMsg()
            _this = request.new__this(self._mor)
            _this.set_attribute_type(self._mor.get_attribute_type())
            request.set_element__this(_this)

            task = self._server._proxy.Destroy_Task(request)._returnval
            vi_task = VITask(task, self._server)
            if sync_run:
                status = vi_task.wait_for_state(
                    [vi_task.STATE_SUCCESS, vi_task.STATE_ERROR])
                if status == vi_task.STATE_ERROR:
                    raise VIException(vi_task.get_error_message(),
                                      FaultTypes.TASK_ERROR)
                return

            return vi_task
        except (VI.ZSI.FaultException) as e:
            raise VIApiException(e)
예제 #10
0
    def delete_instance(self, name=None):
        # Check if name is passed, if not extract from vm_json
        if not name:
            name = self.vm_json.get('name')

        # Initialise vcenter handle
        vcenter_handle = self.server
        try:
            vm = vcenter_handle.get_vm_by_name(name)
        except Exception:
            logger.info('VM %s not present in vCenter. This is OK' % name)
            return
        # Power off if not already
        if not vm.is_powered_off():
            vm.power_off()
        # Invoke Destroy_Task
        request = VI.Destroy_TaskRequestMsg()
        _this = request.new__this(vm._mor)
        _this.set_attribute_type(vm._mor.get_attribute_type())
        request.set_element__this(_this)
        ret = vcenter_handle._proxy.Destroy_Task(request)._returnval

        # Wait for the task to finish
        task = VITask(ret, vcenter_handle)

        status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
        if status == task.STATE_SUCCESS:
            logger.info('VM %s successfully deleted from disk' % name)
        elif status == task.STATE_ERROR:
            logger.info('Error removing vm: %s' % task.get_error_message())
예제 #11
0
def main():
    opts = options()

    # CONNECTION PARAMTERS
    server = opts.esx_host
    user = opts.user
    password = opts.passwd

    # REQUIRED PARAMETERS
    vmname = opts.name

    # CONNECT TO THE SERVER
    s = VIServer()
    s.connect(server, user, password)

    try:
        vm = s.get_vm_by_name(opts.name)
        vm.shutdown_guest()
        
        count = 1
        wait_for = 60
        try: 
            while count < wait_for and vm.is_powered_off() == False:
                count += 1
                time.sleep(1)
                print "Elapsed %s seconds ..." % str(count)

        except Exception as e:
            if count >= wait_for:
                print "Failed to shutdown the VM (%s) even after %s seconds." % (vmname, str(wait_for))
                print "Please login to the EXSi server and fix the issue. Exception: %s" % str(e)
                sys.exit(1)

        check_count(count, wait_for)
    except Exception as e:
        print "Failed to locate and shutdown the new VM using:", opts.name
        print "VM could not be deleted."
        print "Exception:", str(e)

    # Invoke Destroy_Task
    request = VI.Destroy_TaskRequestMsg()
    _this = request.new__this(vm._mor)
    _this.set_attribute_type(vm._mor.get_attribute_type())
    request.set_element__this(_this)
    ret = s._proxy.Destroy_Task(request)._returnval

    # Wait for the task to finish
    task = VITask(ret, s)

    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status == task.STATE_SUCCESS:
        print "VM successfully deleted from disk"
    elif status == task.STATE_ERROR:
        print "Error removing vm:", task.get_error_message()

    # disconnect from the server
    s.disconnect()
예제 #12
0
    def delete_vm_by_name(self, name, remove_files=True):
        """
        Unregisters a VM and remove it files from the datastore by name.
        @name is the VM name.
        @remove_files - if True (default) will delete VM files from datastore.
        """
        statusLine = ''
        success = False

        if not self.__logged:
            raise VIException("Must call 'connect' before invoking this method", FaultTypes.NOT_CONNECTED)
        try:
            # Get VM
            vm = self.get_vm_by_name(name)

            if remove_files:
                # Invoke Destroy_Task
                request = VI.Destroy_TaskRequestMsg()

                _this = request.new__this(vm._mor)
                _this.set_attribute_type(vm._mor.get_attribute_type())
                request.set_element__this(_this)
                ret = self._proxy.Destroy_Task(request)._returnval
                task = VITask(ret, self)
                
                # Wait for the task to finish
                status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])

                if status == task.STATE_SUCCESS:
                    statusLine = "VM successfully unregistered and deleted from datastore"
                    success = True

                elif status == task.STATE_ERROR:
                    statusLine = "Error removing vm: {}".format(task.get_error_message())
                    success = False

            else:
                # Invoke UnregisterVMRequestMsg
                request = VI.UnregisterVMRequestMsg()

                _this = request.new__this(vm._mor)
                _this.set_attribute_type(vm._mor.get_attribute_type())
                request.set_element__this(_this)
                ret = self._proxy.UnregisterVM(request)
                task = VITask(ret, self)

                statusLine = "VM successfully unregistered (files still on datastore)"
                success = True

        except VI.ZSI.FaultException as e:
            raise VIApiException(e)

        finally:
            return success, statusLine
예제 #13
0
 def remove_vnic(self, label):
     '''Function removes a vnic network from an existing vcenter.
     Arguments:
     vm: vm from which vnic should be removed,
     label: name of vnic
     **Note**
     Vnics must be removed wih each time this class is instantiated.
     VNICs are stored in a temporary dictionary, so if VNICs are not
     removed at the end of this class instance, then they will have
     to be manually removed in the vcenter
     '''
     vm = self.vm_json.get('name')
     vm_obj = self.server.get_vm_by_name(vm, self.datacenter)
     if not vm_obj:
         raise Exception("VM %s not found" % vm)
     net_device = None
     if (vm, label) not in self.vm_vnics:
         raise Exception("vcenter removeVNIC error: vm_vnics not found: " +
                         str((vm, label)))
     # Find Virtual Nic device
     for dev in vm_obj.properties.config.hardware.device:
         if (dev._type in [
                 "VirtualE1000", "VirtualE1000e", "VirtualPCNet32",
                 "VirtualVmxnet3"
         ] and dev.deviceInfo.label == self.vm_vnics[(vm, label)][0]):
             net_device = dev._obj
             break
     if not net_device:
         raise Exception("NIC not found")
     # Reconfigure
     request = VI.ReconfigVM_TaskRequestMsg()
     _this = request.new__this(vm_obj._mor)
     _this.set_attribute_type(vm_obj._mor.get_attribute_type())
     request.set_element__this(_this)
     spec = request.new_spec()
     dev_change = spec.new_deviceChange()
     dev_change.set_element_operation("remove")
     dev_change.set_element_device(net_device)
     spec.set_element_deviceChange([dev_change])
     request.set_element_spec(spec)
     ret = self.server._proxy.ReconfigVM_Task(request)._returnval
     # Wait for the task to finish
     task = VITask(ret, self.server)
     status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
     if status == task.STATE_SUCCESS:
         logger.info("   removing vnic in %s on vm %s successful " %
                     (label, vm))
     elif status == task.STATE_ERROR:
         raise Exception("Error removing vnic in %s on vm %s msg %s" %
                         (label, vm, task.get_error_message()))
def destroyGuest(host_con, guest_name):
    powerOffGuest(host_con, guest_name)
    try:
        vm = host_con.get_vm_by_name(guest_name)
        request = VI.Destroy_TaskRequestMsg()
        _this = request.new__this(vm._mor)
        _this.set_attribute_type(vm._mor.get_attribute_type())
        request.set_element__this(_this)
        ret = host_con._proxy.Destroy_Task(request)._returnval
        task = VITask(ret, host_con)
        print 'Waiting for VM to be deleted'
        status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
        if status == task.STATE_SUCCESS:
            result = 'Succesfully removed guest: %s' % guest_name
        elif status == task.STATE_ERROR:
            result = 'Failed to remove VM: %s\n%s' % (guest_name, task.get_error_message())
    except Exception as e:
        result = 'Failed to remove VM: %s\n%s' % (guest_name, str(e))
    return result
예제 #15
0
def destroyGuest(host_con, guest_name):
    powerOffGuest(host_con, guest_name)
    try:
        vm = host_con.get_vm_by_name(guest_name)
        request = VI.Destroy_TaskRequestMsg()
        _this = request.new__this(vm._mor)
        _this.set_attribute_type(vm._mor.get_attribute_type())
        request.set_element__this(_this)
        ret = host_con._proxy.Destroy_Task(request)._returnval
        task = VITask(ret, host_con)
        print 'Waiting for VM to be deleted'
        status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
        if status == task.STATE_SUCCESS:
            result = 'Succesfully removed guest: %s' % guest_name
        elif status == task.STATE_ERROR:
            result = 'Failed to remove VM: %s\n%s' % (guest_name,
                                                      task.get_error_message())
    except Exception as e:
        result = 'Failed to remove VM: %s\n%s' % (guest_name, str(e))
    return result
예제 #16
0
    def handler_revert_to_snapshot(self, task_id, parameters):
        vm_id = parameters['vm_id']
        snapshot_id = parameters['snapshot_id']

        vm_mor = VIMor(vm_id, MORTypes.VirtualMachine)
        snapshot_mor = VIMor(snapshot_id, MORTypes.VirtualMachineSnapshot)

        vm_properties_future = self.application.executor.submit(self.server._get_object_properties, vm_mor, ['name', 'snapshot'])

        request = VI.RevertToSnapshot_TaskRequestMsg()

        mor_snap = request.new__this(snapshot_mor)
        mor_snap.set_attribute_type(snapshot_mor.get_attribute_type())
        request.set_element__this(mor_snap)

        vm_name = None
        snapshot_name = None
        vm_properties = yield vm_properties_future
        for prop in vm_properties.PropSet:
            if prop.Name == 'name':
                vm_name = prop.Val
            elif prop.Name == 'snapshot':
                snapshot_dict = ActionHandler.build_snapshot_dict(prop.Val.RootSnapshotList)
                snapshot_name = snapshot_dict[snapshot_mor].Name

        TaskStatusHandler.update_task(task_id, 'Reverting {0} to {1}...'.format(vm_name, snapshot_name))

        vi_task = self.server._proxy.RevertToSnapshot_Task(request)._returnval

        vi_task = VITask(vi_task, self.server)
        status = yield self.application.executor.submit(
            vi_task.wait_for_state, [vi_task.STATE_SUCCESS,
                                     vi_task.STATE_ERROR])
        if status == vi_task.STATE_ERROR:
            raise VIException(vi_task.get_error_message(),
                              FaultTypes.TASK_ERROR)

        TaskStatusHandler.update_task(task_id, 'Successfully reverted {0} to {1}'.format(vm_name, snapshot_name))
        TaskStatusHandler.delete_task(task_id)

        self.send_vm_update(vm_id)
예제 #17
0
    def destroy_node(self, node, ex_remove_files=True):
        """
        :param ex_remove_files: Remove all the files from the datastore.
        :type ex_remove_files: ``bool``
        """
        ex_remove_files = False
        vm = self._get_vm_for_node(node=node)

        server = self.connection.client

        # Based on code from
        # https://pypi.python.org/pypi/pyxenter
        if ex_remove_files:
            request = VI.Destroy_TaskRequestMsg()

            _this = request.new__this(vm._mor)
            _this.set_attribute_type(vm._mor.get_attribute_type())
            request.set_element__this(_this)

            # pylint: disable=no-member
            ret = server._proxy.Destroy_Task(request)._returnva
            # pylint: enable=no-member
            task = VITask(ret, server)

            # Wait for the task to finish
            status = task.wait_for_state(
                [task.STATE_SUCCESS, task.STATE_ERROR])

            if status == task.STATE_ERROR:
                raise LibcloudError('Error destroying node: %s' %
                                    (task.get_error_message()))
        else:
            request = VI.UnregisterVMRequestMsg()

            _this = request.new__this(vm._mor)
            _this.set_attribute_type(vm._mor.get_attribute_type())
            request.set_element__this(_this)
            ret = server._proxy.UnregisterVM(request)
            task = VITask(ret, server)

        return True
예제 #18
0
 def __execute_net_device_reconfig_task(self, vm_obj, net_device):
     request = VI.ReconfigVM_TaskRequestMsg()
     _this = request.new__this(vm_obj._mor)
     _this.set_attribute_type(vm_obj._mor.get_attribute_type())
     request.set_element__this(_this)
     spec = request.new_spec()
     dev_change = spec.new_deviceChange()
     dev_change.set_element_operation("edit")
     dev_change.set_element_device(net_device)
     spec.set_element_deviceChange([dev_change])
     request.set_element_spec(spec)
     ret = self.server._proxy.ReconfigVM_Task(request)._returnval
     # Wait for the task to finish
     task = VITask(ret, self.server)
     status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
     if status == task.STATE_SUCCESS:
         logger.info("Net device reconfig task on vm %s successful " %
                     (vm_obj.properties.name))
     elif status == task.STATE_ERROR:
         raise Exception("Error: Net device reconfig task on vm %s msg %s" %
                         (vm_obj.properties.name, task.get_error_message()))
예제 #19
0
    def destroy_node(self, node, ex_remove_files=True):
        """
        :param ex_remove_files: Remove all the files from the datastore.
        :type ex_remove_files: ``bool``
        """
        ex_remove_files = False
        vm = self._get_vm_for_node(node=node)

        server = self.connection.client

        # Based on code from
        # https://pypi.python.org/pypi/pyxenter
        if ex_remove_files:
            request = VI.Destroy_TaskRequestMsg()

            _this = request.new__this(vm._mor)
            _this.set_attribute_type(vm._mor.get_attribute_type())
            request.set_element__this(_this)
            ret = server._proxy.Destroy_Task(request)._returnval
            task = VITask(ret, server)

            # Wait for the task to finish
            status = task.wait_for_state([task.STATE_SUCCESS,
                                          task.STATE_ERROR])

            if status == task.STATE_ERROR:
                raise LibcloudError('Error destroying node: %s' %
                                    (task.get_error_message()))
        else:
            request = VI.UnregisterVMRequestMsg()

            _this = request.new__this(vm._mor)
            _this.set_attribute_type(vm._mor.get_attribute_type())
            request.set_element__this(_this)
            ret = server._proxy.UnregisterVM(request)
            task = VITask(ret, server)

        return True
예제 #20
0
    create_vm_request.set_element__this(folder_mor)
    rp_mor = create_vm_request.new_pool(rpmor)
    rp_mor.set_attribute_type(rpmor.get_attribute_type())
    create_vm_request.set_element_pool(rp_mor)
    host_mor = create_vm_request.new_host(hostmor)
    host_mor.set_attribute_type(hostmor.get_attribute_type())
    create_vm_request.set_element_host(host_mor)

    # CREATE THE VM
    taskmor = vsphere_client._proxy.CreateVM_Task(create_vm_request)._returnval
    task = VITask(taskmor, vsphere_client)
    task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if task.get_state() == task.STATE_ERROR:
        vsphere_client.disconnect()
        module.fail_json(msg="Error creating vm: %s" %
                         task.get_error_message())
    else:
        # We always need to get the vm because we are going to gather facts
        vm = vsphere_client.get_vm_by_name(guest)

        # VM was created. If there is any extra config options specified, set
        # them here , disconnect from vcenter, then exit.
        if vm_extra_config:
            vm.set_extra_config(vm_extra_config)

        # Power on the VM if it was requested
        power_state(vm, state, True)

        vsphere_client.disconnect()
        module.exit_json(
            ansible_facts=gather_facts(vm),
예제 #21
0
# Add a CD-ROM device to the VM.
add_cdrom("iso", cd_iso_location)
add_nic("vmxnet3", network_name, "dvs")
#add_nic("vmxnet3", "VM Network", "standard")

config.set_element_deviceChange(devices)
create_vm_request.set_element_config(config)
folder_mor = create_vm_request.new__this(vmfmor)
folder_mor.set_attribute_type(vmfmor.get_attribute_type())
create_vm_request.set_element__this(folder_mor)
rp_mor = create_vm_request.new_pool(rpmor)
rp_mor.set_attribute_type(rpmor.get_attribute_type())
create_vm_request.set_element_pool(rp_mor)
host_mor = create_vm_request.new_host(hostmor)
host_mor.set_attribute_type(hostmor.get_attribute_type())
create_vm_request.set_element_host(host_mor)

# CREATE THE VM
taskmor = s._proxy.CreateVM_Task(create_vm_request)._returnval
task = VITask(taskmor, s)
task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
if task.get_state() == task.STATE_ERROR:
    raise Exception("Error creating vm: %s" % task.get_error_message())

# If there is any extra config options specified, set them here.
if extra_config:
    vm = s.get_vm_by_name(vmname)
    vm.set_extra_config(extra_config)

s.disconnect()
예제 #22
0
def createGuest(host_con,guest_dc,guest_host,guest_name,guest_ver,guest_mem,guest_cpu,guest_iso,guest_os,guest_disk_gb,guest_ds,guest_network,guest_enterbios):
    #get dc MOR from list
    dc_list=[k for k,v in host_con.get_datacenters().items() if v==guest_dc]
    if dc_list:
        dc_mor=dc_list[0]
    else:
        host_con.disconnect()
        return "Cannot find dc: "+guest_dc
    dc_props=VIProperty(host_con, dc_mor)
    #get vmFolder
    vmf_mor = dc_props.vmFolder._obj
    #get hostfolder MOR
    hf_mor=dc_props.hostFolder._obj
    #get computer resources MORs
    cr_mors=host_con._retrieve_properties_traversal(property_names=['name','host'],from_node=hf_mor,obj_type='ComputeResource')
    #get host MOR
    try:
        host_mor=[k for k,v in host_con.get_hosts().items() if v==guest_host][0]
    except IndexError:
        host_con.disconnect()
        return "Cannot find host: "+guest_host
    #get computer resource MOR for host
    cr_mor=None
    for cr in cr_mors:
        if cr_mor:
            break
        for p in cr.PropSet:
            if p.Name=="host":
                for h in p.Val.get_element_ManagedObjectReference():
                    if h==host_mor:
                         cr_mor=cr.Obj
                         break
                if cr_mor:
                    break
    cr_props=VIProperty(host_con,cr_mor)
    #get resource pool MOR
    rp_mor=cr_props.resourcePool._obj

    #build guest properties
    #get config target
    request=VI.QueryConfigTargetRequestMsg()
    _this=request.new__this(cr_props.environmentBrowser._obj)
    _this.set_attribute_type(cr_props.environmentBrowser._obj.get_attribute_type())
    request.set_element__this(_this)
    h=request.new_host(host_mor)
    h.set_attribute_type(host_mor.get_attribute_type())
    request.set_element_host(h)
    config_target=host_con._proxy.QueryConfigTarget(request)._returnval
    #get default devices
    request=VI.QueryConfigOptionRequestMsg()
    _this=request.new__this(cr_props.environmentBrowser._obj)
    _this.set_attribute_type(cr_props.environmentBrowser._obj.get_attribute_type())
    request.set_element__this(_this)
    h=request.new_host(host_mor)
    h.set_attribute_type(host_mor.get_attribute_type())
    request.set_element_host(h)
    config_option=host_con._proxy.QueryConfigOption(request)._returnval
    defaul_devs=config_option.DefaultDevice
    #get network names
    if guest_network:
        net_name=guest_network
    else:
        for net in config_target.Network:
            if net.Network.Accessible:
                net_name = net.Network.Name
    #get ds
    ds_target = None
    for d in config_target.Datastore:
        if d.Datastore.Accessible and (guest_ds and d.Datastore.Name==guest_ds) or (not guest_ds):
            ds_target=d.Datastore.Datastore
            guest_ds=d.Datastore.Name
            break
    if not ds_target:
        host_con.disconnect()
        return "Cannot find datastore: "+guest_ds
    ds_vol_name="[%s]" % guest_ds

    #create task request
    create_vm_request=VI.CreateVM_TaskRequestMsg()
    config=create_vm_request.new_config()
    #set location of vmx
    vm_files=config.new_files()
    vm_files.set_element_vmPathName(ds_vol_name)
    config.set_element_files(vm_files)
    if guest_enterbios:
        #set boot parameters
        vmboot=config.new_bootOptions()
        vmboot.set_element_enterBIOSSetup(True)
        config.set_element_bootOptions(vmboot)
    #set general parameters
    config.set_element_version(guest_ver)
    config.set_element_name(guest_name)
    config.set_element_memoryMB(guest_mem)
    config.set_element_memoryHotAddEnabled(True)
    config.set_element_numCPUs(guest_cpu)
    config.set_element_guestId(guest_os)
    config.set_element_cpuHotAddEnabled(True)

    #create devices
    devices = []
    #add controller to devices
    disk_ctrl_key=1
    scsi_ctrl_spec=config.new_deviceChange()
    scsi_ctrl_spec.set_element_operation('add')
    scsi_ctrl=VI.ns0.ParaVirtualSCSIController_Def("scsi_ctrl").pyclass()
    scsi_ctrl.set_element_busNumber(0)
    scsi_ctrl.set_element_key(disk_ctrl_key)
    scsi_ctrl.set_element_sharedBus("noSharing")
    scsi_ctrl_spec.set_element_device(scsi_ctrl)
    devices.append(scsi_ctrl_spec)
    #find ide controller
    ide_ctlr = None
    for dev in defaul_devs:
        if dev.typecode.type[1] == "VirtualIDEController":
            ide_ctlr = dev
    #add cdrom
    if ide_ctlr:
        cd_spec = config.new_deviceChange()
        cd_spec.set_element_operation('add')
        cd_ctrl = VI.ns0.VirtualCdrom_Def("cd_ctrl").pyclass()
        cd_device_backing =VI.ns0.VirtualCdromIsoBackingInfo_Def("cd_device_backing").pyclass()
        ds_ref = cd_device_backing.new_datastore(ds_target)
        ds_ref.set_attribute_type(ds_target.get_attribute_type())
        cd_device_backing.set_element_datastore(ds_ref)
        cd_device_backing.set_element_fileName("%s %s" % (ds_vol_name,guest_iso))
        cd_ctrl.set_element_backing(cd_device_backing)
        cd_ctrl.set_element_key(20)
        cd_ctrl.set_element_controllerKey(ide_ctlr.get_element_key())
        cd_ctrl.set_element_unitNumber(0)
        cd_spec.set_element_device(cd_ctrl)
        devices.append(cd_spec)
    #add disk
    disk_spec=config.new_deviceChange()
    disk_spec.set_element_fileOperation("create")
    disk_spec.set_element_operation("add")
    disk_ctlr=VI.ns0.VirtualDisk_Def("disk_ctlr").pyclass()
    disk_backing=VI.ns0.VirtualDiskFlatVer2BackingInfo_Def("disk_backing").pyclass()
    disk_backing.set_element_fileName(ds_vol_name)
    disk_backing.set_element_diskMode("persistent")
    disk_ctlr.set_element_key(0)
    disk_ctlr.set_element_controllerKey(disk_ctrl_key)
    disk_ctlr.set_element_unitNumber(0)
    disk_ctlr.set_element_backing(disk_backing)
    guest_disk_size=guest_disk_gb*1024*1024
    disk_ctlr.set_element_capacityInKB(guest_disk_size)
    disk_spec.set_element_device(disk_ctlr)
    devices.append(disk_spec)
    #add a network controller
    nic_spec = config.new_deviceChange()
    if net_name:
        nic_spec.set_element_operation("add")
        nic_ctlr = VI.ns0.VirtualVmxnet3_Def("nic_ctlr").pyclass()
        nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def("nic_backing").pyclass()
        nic_backing.set_element_deviceName(net_name)
        nic_ctlr.set_element_addressType("generated")
        nic_ctlr.set_element_backing(nic_backing)
        nic_ctlr.set_element_key(4)
        nic_spec.set_element_device(nic_ctlr)
        devices.append(nic_spec)

    #create vm request
    config.set_element_deviceChange(devices)
    create_vm_request.set_element_config(config)
    new_vmf_mor=create_vm_request.new__this(vmf_mor)
    new_vmf_mor.set_attribute_type(vmf_mor.get_attribute_type())
    new_rp_mor=create_vm_request.new_pool(rp_mor)
    new_rp_mor.set_attribute_type(rp_mor.get_attribute_type())
    new_host_mor=create_vm_request.new_host(host_mor)
    new_host_mor.set_attribute_type(host_mor.get_attribute_type())
    create_vm_request.set_element__this(new_vmf_mor)
    create_vm_request.set_element_pool(new_rp_mor)
    create_vm_request.set_element_host(new_host_mor)

    #finally actually create the guest :)
    task_mor=host_con._proxy.CreateVM_Task(create_vm_request)._returnval
    task=VITask(task_mor,host_con)
    task.wait_for_state([task.STATE_SUCCESS,task.STATE_ERROR])

    if task.get_state()==task.STATE_ERROR:
        return "Cannot create guest: "+task.get_error_message()
    else:
        return "Succesfully created guest: "+guest_name
예제 #23
0
def change_dvs_net(s, vm_obj, hostname, dstlabel, curlabel):
    """Takes a VIServer and VIVirtualMachine object and reconfigures
    dVS portgroups according to the mappings in the pg_map dict. The
    pg_map dict must contain the source portgroup as key and the
    destination portgroup as value"""

    # Find virtual NIC devices
    pg_map = {}

    if vm_obj:
        net_device = []
        for dev in vm_obj.properties.config.hardware.device:
            if dev._type in [
                    "VirtualE1000", "VirtualE1000e", "VirtualPCNet32",
                    "VirtualVmxnet", "VirtualNmxnet2", "VirtualVmxnet3"
            ]:
                net_device.append(dev)
    if len(net_device) == 0:
        raise Exception("The vm seems to lack a Virtual Nic")

    # Lets get the information for the port group
    network_name = dstlabel
    network_name2 = curlabel

    for ds_mor, name in s.get_datacenters().items():
        dcprops = VIProperty(s, ds_mor)
        break

    # networkFolder managed object reference
    nfmor = dcprops.networkFolder._obj
    dvpg_mors = s._retrieve_properties_traversal(
        property_names=['name', 'key'],
        from_node=nfmor,
        obj_type='DistributedVirtualPortgroup')

    # Get the portgroup managed object.
    dvpg_mor = None
    for dvpg in dvpg_mors:
        if dvpg_mor:
            break
        for p in dvpg.PropSet:
            if p.Name == "name" and p.Val == network_name:
                dvpg_mor = dvpg
            if dvpg_mor:
                break

    # Get the portgroup managed object.
    dvpg_mor2 = None
    for dvpg2 in dvpg_mors:
        if dvpg_mor2:
            break
        for p in dvpg2.PropSet:
            if p.Name == "name" and p.Val == network_name2:
                dvpg_mor2 = dvpg2
            if dvpg_mor2:
                break

    if dvpg_mor == None:
        print "Didnt find the dvpg %s, exiting now" % (network_name)
        exit()

    if dvpg_mor2 == None:
        print "Didnt find the dvpg %s, exiting now" % (network_name)
        exit()

    # Get the portgroup key
    portgroupKey = None
    for p in dvpg_mor.PropSet:
        if p.Name == "key":
            portgroupKey = p.Val
    portgroupKey2 = None
    for p in dvpg_mor2.PropSet:
        if p.Name == "key":
            portgroupKey2 = p.Val

    # Use pg_map to set the new Portgroups
    pg_map[portgroupKey2] = portgroupKey
    for dev in net_device:
        old_portgroup = dev.backing.port.portgroupKey
        if pg_map.has_key(old_portgroup):
            dev.backing.port._obj.set_element_portgroupKey(
                pg_map[old_portgroup])
            dev.backing.port._obj.set_element_portKey('')

    # Invoke ReconfigVM_Task
    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vm_obj._mor)
    _this.set_attribute_type(vm_obj._mor.get_attribute_type())
    request.set_element__this(_this)

    # Build a list of device change spec objects
    devs_changed = []
    for dev in net_device:
        spec = request.new_spec()
        dev_change = spec.new_deviceChange()
        dev_change.set_element_device(dev._obj)
        dev_change.set_element_operation("edit")
        devs_changed.append(dev_change)

    # Submit the device change list
    spec.set_element_deviceChange(devs_changed)
    request.set_element_spec(spec)
    ret = s._proxy.ReconfigVM_Task(request)._returnval

    # Wait for the task to finish
    task = VITask(ret, s)

    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status == task.STATE_SUCCESS:
        print "SUCCESS --> VM '%s' successfully reconfigured!" % hostname
    elif status == task.STATE_ERROR:
        print "ERROR --> Something went wrong reconfiguring vm '%s'!" % hostname, task.get_error_message(
        )
    else:
        print "ERROR --> VM '%s' not found!" % hostname
예제 #24
0
def create_vm():
    opts = options()

    # CONNECTION PARAMTERS
    server = opts.esx_host
    user = opts.user
    password = opts.passwd

    # REQUIRED PARAMETERS
    vmname = opts.name
    # datacentername = "ha-datacenter"
    datacentername = opts.datacenter
    hostname = opts.hostname
    annotation = "My Product Product Virtual Machine"
    memorysize = opts.ram
    cpucount = opts.cpus
    # cd_iso_location =
    # "iso/My_Product_2013_02_26_05_15_00.iso"
    # # located in the ESX datastore
    cd_iso_location = opts.iso
    guestosid = "centos64Guest"
    # find your os in
    # http://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/vim.vm.GuestOsDescriptor.GuestOsIdentifier.html
    disksize = (1024 ** 2) * 100  # In Kb: 1024 ** 2 (power) = 1GB; 1GB * 100 = 100GB.

    # OPTIONAL PARAMETERS

    datastorename = opts.datastore  # if None, will use the first datastore available

    # CONNECT TO THE SERVER
    s = VIServer()
    s.connect(server, user, password)

    # GET INITIAL PROPERTIES AND OBJECTS

     # get datacenter
    dcmor = s._get_datacenters()[datacentername]
    dcprops = VIProperty(s, dcmor)
     # get host folder
    hfmor = dcprops.hostFolder._obj

     # get computer resources
    crmors = s._retrieve_properties_traversal(property_names=['name',
                                                              'host'], from_node=hfmor, obj_type='ComputeResource')

     # get host
    for hosts in s.get_hosts().items():
        try:
            if hosts.index(hostname) == 1:
                hostmor = hosts[0]
        except:
            pass

    # get computer resource of this host
    crmor = None
    for cr in crmors:
        if crmor:
            break
        for p in cr.PropSet:
            # print 'p.Name:', p.Name
            if p.Name == "host":
                for h in p.Val.get_element_ManagedObjectReference():
                    if h == hostmor:
                        crmor = cr.Obj
                        break
                if crmor:
                    break
    crprops = VIProperty(s, crmor)

     # get resource pool
    rpmor = crprops.resourcePool._obj

     # get vmFolder
    vmfmor = dcprops.vmFolder._obj

    # CREATE VM CONFIGURATION

     # get config target
    request = VI.QueryConfigTargetRequestMsg()
    _this = request.new__this(crprops.environmentBrowser._obj)
    _this.set_attribute_type(crprops.environmentBrowser._obj.get_attribute_type())
    request.set_element__this(_this)
    h = request.new_host(hostmor)
    h.set_attribute_type(hostmor.get_attribute_type())
    request.set_element_host(h)
    config_target = s._proxy.QueryConfigTarget(request)._returnval

     # get default devices
    request = VI.QueryConfigOptionRequestMsg()
    _this = request.new__this(crprops.environmentBrowser._obj)
    _this.set_attribute_type(crprops.environmentBrowser._obj.get_attribute_type())
    request.set_element__this(_this)
    h = request.new_host(hostmor)
    h.set_attribute_type(hostmor.get_attribute_type())
    request.set_element_host(h)
    config_option = s._proxy.QueryConfigOption(request)._returnval
    defaul_devs = config_option.DefaultDevice

    # get network name
    # would be assigned to the last known working network interface.
    # in this case, it would be VM Network 2.
    network_name = None
    for n in config_target.Network:
        if n.Network.Accessible:
            network_name = n.Network.Name

    # can hard-code it as 'VM Network'

    # get datastore
    # Just verifies that the datastorename mentioned at the top matches with the
    # available list of datastores.
    ds = None
    for d in config_target.Datastore:
        if d.Datastore.Accessible and (datastorename and d.Datastore.Name
                                       == datastorename) or (not datastorename):
            ds = d.Datastore.Datastore
            datastorename = d.Datastore.Name
            break
    if not ds:
        raise Exception("couldn't find datastore")
    volume_name = "[%s]" % datastorename

     # add parameters to the create vm task
    create_vm_request = VI.CreateVM_TaskRequestMsg()
    config = create_vm_request.new_config()
    vmfiles = config.new_files()
    vmfiles.set_element_vmPathName(volume_name)
    config.set_element_files(vmfiles)
    config.set_element_name(vmname)
    config.set_element_annotation(annotation)
    config.set_element_memoryMB(memorysize)
    config.set_element_numCPUs(cpucount)
    config.set_element_guestId(guestosid)
    devices = []

     # add a scsi controller
    disk_ctrl_key = 1
    scsi_ctrl_spec = config.new_deviceChange()
    scsi_ctrl_spec.set_element_operation('add')
    scsi_ctrl = VI.ns0.VirtualLsiLogicController_Def("scsi_ctrl").pyclass()
    scsi_ctrl.set_element_busNumber(0)
    scsi_ctrl.set_element_key(disk_ctrl_key)
    scsi_ctrl.set_element_sharedBus("noSharing")

    scsi_ctrl_spec.set_element_device(scsi_ctrl)
    devices.append(scsi_ctrl_spec)

     # find ide controller
    ide_ctlr = None
    for dev in defaul_devs:
        if dev.typecode.type[1] == "VirtualIDEController":
            ide_ctlr = dev

     # add a cdrom based on a physical device
    if ide_ctlr:
        cd_spec = config.new_deviceChange()
        cd_spec.set_element_operation('add')
        cd_ctrl = VI.ns0.VirtualCdrom_Def("cd_ctrl").pyclass()
        cd_device_backing = VI.ns0.VirtualCdromIsoBackingInfo_Def("cd_device_backing").pyclass()
        ds_ref = cd_device_backing.new_datastore(ds)
        ds_ref.set_attribute_type(ds.get_attribute_type())
        cd_device_backing.set_element_datastore(ds_ref)
        cd_device_backing.set_element_fileName("%s %s" % (volume_name,
                                                          cd_iso_location))
        cd_ctrl.set_element_backing(cd_device_backing)
        cd_ctrl.set_element_key(20)
        cd_ctrl.set_element_controllerKey(ide_ctlr.get_element_key())
        cd_ctrl.set_element_unitNumber(0)
        cd_spec.set_element_device(cd_ctrl)
        devices.append(cd_spec)

     # create a new disk - file based - for the vm
    disk_spec = config.new_deviceChange()
    disk_spec.set_element_fileOperation("create")
    disk_spec.set_element_operation("add")
    disk_ctlr = VI.ns0.VirtualDisk_Def("disk_ctlr").pyclass()
    disk_backing = VI.ns0.VirtualDiskFlatVer2BackingInfo_Def("disk_backing").pyclass()
    disk_backing.set_element_fileName(volume_name)
    disk_backing.set_element_diskMode("persistent")
    disk_backing.ThinProvisioned = True
    disk_ctlr.set_element_key(0)
    disk_ctlr.set_element_controllerKey(disk_ctrl_key)
    disk_ctlr.set_element_unitNumber(0)
    disk_ctlr.set_element_backing(disk_backing)
    disk_ctlr.set_element_capacityInKB(disksize)
    disk_spec.set_element_device(disk_ctlr)
    devices.append(disk_spec)

     # add a NIC. the network Name must be set as the device name to create the NIC.
    nic_spec = config.new_deviceChange()
    if network_name:
        nic_spec.set_element_operation("add")
        nic_ctlr = VI.ns0.VirtualPCNet32_Def("nic_ctlr").pyclass()
        nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def("nic_backing").pyclass()
        nic_backing.set_element_deviceName(network_name)
        nic_ctlr.set_element_addressType("generated")
        nic_ctlr.set_element_backing(nic_backing)
        nic_ctlr.set_element_key(4)
        nic_spec.set_element_device(nic_ctlr)
        devices.append(nic_spec)

    config.set_element_deviceChange(devices)
    create_vm_request.set_element_config(config)
    folder_mor = create_vm_request.new__this(vmfmor)
    folder_mor.set_attribute_type(vmfmor.get_attribute_type())
    create_vm_request.set_element__this(folder_mor)
    rp_mor = create_vm_request.new_pool(rpmor)
    rp_mor.set_attribute_type(rpmor.get_attribute_type())
    create_vm_request.set_element_pool(rp_mor)
    host_mor = create_vm_request.new_host(hostmor)
    host_mor.set_attribute_type(hostmor.get_attribute_type())
    create_vm_request.set_element_host(host_mor)

    # CREATE THE VM
    taskmor = s._proxy.CreateVM_Task(create_vm_request)._returnval
    task = VITask(taskmor, s)
    task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])

    if task.get_state() == task.STATE_ERROR:
        raise Exception("Error creating vm: %s" %
                        task.get_error_message())

    # Here you should power your VM (refer to the pysphere documentation)
    # So it boots from the specified ISO location
    try:
        new_vm = s.get_vm_by_name(opts.name)
        connect_vm_cdroms(new_vm, s)
        try:
            new_vm.power_on()
        except Exception as e:
            print "Failed to power-on the new VM using:", opts.name
            print "Exception:", str(e)
    except Exception as e:
        print "Failed to locate the new VM using:", opts.name
        print "Exception:", str(e)
    # disconnect from the server
    s.disconnect()
    new_vmf_mor.set_attribute_type(vmf_mor.get_attribute_type()) 
    new_rp_mor=create_vm_request.new_pool(rp_mor) 
    new_rp_mor.set_attribute_type(rp_mor.get_attribute_type()) 
    new_host_mor=create_vm_request.new_host(host_mor) 
    new_host_mor.set_attribute_type(host_mor.get_attribute_type()) 
    create_vm_request.set_element__this(new_vmf_mor) 
    create_vm_request.set_element_pool(new_rp_mor) 
    create_vm_request.set_element_host(new_host_mor) 
    
    #finally actually create the guest :)
    task_mor=host_con._proxy.CreateVM_Task(create_vm_request)._returnval 
    task=VITask(task_mor,host_con) 
    task.wait_for_state([task.STATE_SUCCESS,task.STATE_ERROR]) 
    
    if task.get_state()==task.STATE_ERROR: 
        return "Cannot create guest: "+task.get_error_message()
    else:
        return "Succesfully created guest: "+guest_name

def getMac(host_con,guest_name):
    vm=host_con.get_vm_by_name(guest_name)
    net = vm.get_property('net', from_cache=False)
    if net:
        for interface in net:
            mac = interface.get('mac_address', None)
            if mac:
                return mac

    #for v in vm.get_property("devices").values():
    #    if v.get('macAddress'):
    #        return v.get('macAddress')
예제 #26
0
    new_vmf_mor.set_attribute_type(vmf_mor.get_attribute_type())
    new_rp_mor = create_vm_request.new_pool(rp_mor)
    new_rp_mor.set_attribute_type(rp_mor.get_attribute_type())
    new_host_mor = create_vm_request.new_host(host_mor)
    new_host_mor.set_attribute_type(host_mor.get_attribute_type())
    create_vm_request.set_element__this(new_vmf_mor)
    create_vm_request.set_element_pool(new_rp_mor)
    create_vm_request.set_element_host(new_host_mor)

    #finally actually create the guest :)
    task_mor = host_con._proxy.CreateVM_Task(create_vm_request)._returnval
    task = VITask(task_mor, host_con)
    task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])

    if task.get_state() == task.STATE_ERROR:
        return "Cannot create guest: " + task.get_error_message()
    else:
        return "Succesfully created guest: " + guest_name


def getMac(host_con, guest_name):
    vm = host_con.get_vm_by_name(guest_name)
    net = vm.get_property('net', from_cache=False)
    if net:
        for interface in net:
            mac = interface.get('mac_address', None)
            if mac:
                return mac

    #for v in vm.get_property("devices").values():
    #    if v.get('macAddress'):
예제 #27
0
 def add_vnic(self, network):
     '''
     addVNIC adds a vnic to the desired vm_name
     Arguments:
     vm_name is the name of the vm that you wish to add the vnic to.
     network: the name of the vnic that you are adding
     **Note**
     Vnics must be removed wih each time this class is instantiated.
     VNICs are stored in a temporary dictionary, so if VNICs are not
     removed at the end of this class instance, then they will have
     to be manually removed in the vcenter
     '''
     vm_name = self.vm_json.get('name')
     # datacentername = "Dev_Datacenter"
     # hostname = "esx14.mgt.hawaii.edu"
     network_name = network
     s = self.server
     dcmor = None
     datacenters = s.get_datacenters()
     logger.info('datacenters ', datacenters)
     # "GET INITIAL PROPERTIES AND OBJECTS"
     for i in datacenters:
         vm_list = s.get_registered_vms(i)  # list of vms
         for k in vm_list:
             if vm_name in k:
                 vm = k
                 dcmor = i
                 break
     if dcmor is None:
         logger.info("Datacenters: %s " % (datacenters))
         for k, v in datacenters.items():
             # When a VM is renamed, pysphere query to vcenter does not seem
             # to find the new vm name in any data center. So when we don't
             # find a vm in any data center, we assume it belongs in
             # Sunnyvale datacenter.
             # TODO(bindu): add an option to testbed file to pass this as
             # an option.
             if v == 'Sunnyvale':
                 logger.info('Datacenter for Sunnyvale %s' % k)
                 logger.info('Failed to find VM %s in any data center %s' %
                             vm_name)
                 logger.info(
                     '   VM might have been renamed. Assume datacenter Sunnyvale'
                 )
                 dcmor = k
                 break
         # raise Exception("Failed to find VM %s in any data center" % (vm_name))
     dcprops = VIProperty(s, dcmor)
     # "networkFolder managed object reference"
     nfmor = dcprops.networkFolder._obj
     dvpg_mors = s._retrieve_properties_traversal(
         property_names=['name', 'key'],
         from_node=nfmor,
         obj_type='DistributedVirtualPortgroup')
     # "get the portgroup managed object."
     dvpg_mor = None
     for dvpg in dvpg_mors:
         if dvpg_mor:
             break
         for p in dvpg.PropSet:
             if p.Name == "name" and p.Val == network_name:
                 dvpg_mor = dvpg
             if dvpg_mor:
                 break
     if dvpg_mor is None:
         raise Exception("Didn't find the dvpg %s, exiting now" %
                         (network_name))
     # "Get the portgroup key"
     portgroupKey = None
     for p in dvpg_mor.PropSet:
         if p.Name == "key":
             portgroupKey = p.Val
     # "Grab the dvswitch uuid and portgroup properties"
     dvswitch_mors = s._retrieve_properties_traversal(
         property_names=['uuid', 'portgroup'],
         from_node=nfmor,
         obj_type='DistributedVirtualSwitch')
     dvswitch_mor = None
     # "Get the appropriate dvswitches managed object"
     for dvswitch in dvswitch_mors:
         if dvswitch_mor:
             break
         for p in dvswitch.PropSet:
             if p.Name == "portgroup":
                 pg_mors = p.Val.ManagedObjectReference
                 for pg_mor in pg_mors:
                     if dvswitch_mor:
                         break
                     key_mor = s._get_object_properties(
                         pg_mor, property_names=['key'])
                     for key in key_mor.PropSet:
                         if key.Val == portgroupKey:
                             dvswitch_mor = dvswitch
     # Get the switches uuid
     dvswitch_uuid = None
     for p in dvswitch_mor.PropSet:
         if p.Name == "uuid":
             dvswitch_uuid = p.Val
     # create_vm_request = VI.CreateVM_TaskRequestMsg()
     # config = create_vm_request.new_config()
     vm_obj = s.get_vm_by_name(vm_name, self.datacenter)
     vm = vm_obj
     net_device_mac = []
     for dev in vm.properties.config.hardware.device:
         if (dev._type in [
                 "VirtualE1000", "VirtualE1000e", "VirtualPCNet32",
                 "VirtualVmxnet3"
         ]):
             # print dev.macAddress
             net_device_mac.append(dev.macAddress)
     vm_obj = s.get_vm_by_name(vm_name, self.datacenter)
     # Invoke ReconfigVM_Task
     request = VI.ReconfigVM_TaskRequestMsg()
     _this = request.new__this(vm_obj._mor)  # get the resource pool
     _this.set_attribute_type(vm_obj._mor.get_attribute_type())
     request.set_element__this(_this)
     spec = request.new_spec()
     # add a NIC. the network Name must be set as the device name.
     dev_change = spec.new_deviceChange()
     dev_change.set_element_operation("add")
     nic_ctlr = VI.ns0.VirtualPCNet32_Def("nic_ctlr").pyclass()
     # nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def("nic_backing").pyclass()
     # nic_backing.set_element_deviceName(label)
     nic_backing_port = VI.ns0.DistributedVirtualSwitchPortConnection_Def(
         "nic_backing_port").pyclass()
     nic_backing_port.set_element_switchUuid(dvswitch_uuid)
     nic_backing_port.set_element_portgroupKey(portgroupKey)
     nic_backing = VI.ns0.VirtualEthernetCardDistributedVirtualPortBackingInfo_Def(
         "nic_backing").pyclass()
     nic_backing.set_element_port(nic_backing_port)
     # print inspect.getmembers(nic_backing)
     # nic_backing.deviceName == network
     nic_ctlr.set_element_addressType("generated")
     nic_ctlr.set_element_backing(nic_backing)
     nic_ctlr.set_element_key(4)
     dev_change.set_element_device(nic_ctlr)
     spec.set_element_deviceChange([dev_change])
     request.set_element_spec(spec)
     ret = self.server._proxy.ReconfigVM_Task(request)._returnval
     # net_device.Connectable.Connected = True
     # Wait for the task to finish
     task = VITask(ret, self.server)
     status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
     vm = self.server.get_vm_by_name(vm_name, self.datacenter)
     for dev in vm.properties.config.hardware.device:
         if (dev._type in [
                 "VirtualE1000", "VirtualE1000e", "VirtualPCNet32",
                 "VirtualVmxnet3"
         ] and dev.macAddress not in net_device_mac):
             # print dev.macAddress
             self.vm_vnics[(vm_name, network)] = (dev.deviceInfo.label,
                                                  dev.macAddress)
     if status == task.STATE_SUCCESS:
         logger.info("vnic %s on vm %s successfully added" %
                     (dev.macAddress, vm_name))
     elif status == task.STATE_ERROR:
         logger.info("Error adding vm: %s" % vm_name,
                     task.get_error_message())
     return dev.macAddress
예제 #28
0
    def create_vm(self, vm_options):
        """
         Creates a virtual machine on ESXi server
        :param vm_options: dict, which contain parameters for VM
        'vm_name'
        'iso'
        'datacenter_name'
        'datastore_name'
        'resource_pool_name'
        'networks'
        'description'
        'esx_hostname'
        'hard_drive'
        'guestosid'
        'memory_size'
        'cpu_count'
        'disk_size'
        See create_vm_old for details
        :raise: CreatorException, ExistenceException
        """
        self._connect_to_esx()

        # VM NAME
        vm_name = None
        try:
            vm_name = str(vm_options["vm_name"])
            vm_temp = self.esx_server.get_vm_by_name(vm_name)
            if vm_temp:
                raise ExistenceException('VM "%s" already exists' % vm_name)
        except KeyError:
            raise CreatorException("Must specify VM name")
        except pysphere.VIException as inst:
            if "[Object Not Found]" in str(inst):
                pass

        # HOSTNAME
        hosts = self.esx_server.get_hosts()
        try:
            esx_hostname = vm_options["esx_hostname"]
            if not esx_hostname:
                raise KeyError
            elif not (esx_hostname in hosts.values()):
                raise CreatorException("Couldn't find host '%s'" % esx_hostname)
        except KeyError:
            if len(hosts.values()) > 1:
                raise CreatorException("More than 1 host - must specify ESX Hostname")
            elif not hosts.values():
                raise CreatorException("Couldn't find available host")
            esx_hostname = hosts[0]
            # MOR and PROPERTIES
        hostmor = [k for k, v in hosts.items() if v == esx_hostname][0]
        hostprop = VIProperty(self.esx_server, hostmor)

        # DATACENTER - FIX EXCEPTION
        # todo: fix self.esx_server.get_datacenters().values()
        dcs = self.esx_server.get_datacenters()
        dc_values = dcs.values()
        try:
            dc_name = vm_options["datacenter_name"]
            if not dc_name in dc_values:
                raise CreatorException("Couldn't find datacenter '%s'" + dc_name)
        except KeyError:
            if len(dc_values) > 1:
                raise CreatorException("More than 1 datacenter - must specify ESX Hostname")
            elif not dc_values:
                raise CreatorException("Couldn't find available datacenter")
            dc_name = dc_values[0]
            # MOR and PROPERTIES
        dcmor = [k for k, v in dcs.items() if v == dc_name][0]
        dcprops = VIProperty(self.esx_server, dcmor)

        # DATASTORE
        dcs = hostprop.datastore
        try:
            ds_name = vm_options["datastore_name"]
            ds_list = []
            for ds in dcs:
                ds_list.append(ds.name)
            if not ds_name in ds_list:
                raise CreatorException("Couldn't find datastore or datastore is not available")
        except KeyError:
            if len(dcs) > 1:
                raise CreatorException("More than 1 datastore on ESX - must specify datastore name")
            elif not dcs:
                raise CreatorException("Couldn't find available datastore")
            ds_name = dcs[0].name

        # RESOURCE POOL
        resource_pool_name = ""
        try:
            resource_pool_name = vm_options["resource_pool_name"]
            if resource_pool_name == "/":
                pass
            elif resource_pool_name[0] != "/":
                resource_pool_name = "/{0}".format(resource_pool_name)
        except KeyError:
            resource_pool_name = "/"
        finally:
            rpmor = self._fetch_resource_pool(resource_pool_name, esx_hostname)
            if not rpmor:
                raise CreatorException("Couldn't find resource pool '%s'" % resource_pool_name)

        # NETWORKS
        try:
            networks = list(vm_options["networks"])
        except Exception:
            networks = []

        try:
            iso = vm_options["iso"]
            if iso == False:
                iso = None
            else:
                # todo: hide magic
                iso = iso[iso.find(ds_name) + len(ds_name) + 1 :]
        except KeyError:
            iso = None

        # Description
        try:
            description = vm_options["description"]
        except KeyError:
            description = "Description for VM %s" % vm_name

        try:
            guestosid = vm_options["guestosid"]
        except KeyError:
            guestosid = "otherGuest"

        try:
            memory_size = int(vm_options["memory_size"])
            if memory_size <= 0:
                raise CreatorException("Disk size must be greater than 0")
        except Exception:
            memory_size = DEFAULT_MEMORY_SIZE  # MB

        try:
            cpu_count = int(vm_options["cpu_count"])
        except Exception:
            cpu_count = DEFAULT_CPU_COUNT

        try:
            disk_size = int(vm_options["disk_size"])
            if disk_size < 0:
                raise CreatorException("Disk size must be greater than 0")
        except Exception:
            disk_size = DEFAULT_DISK_SIZE  # KB

        crprops = self._fetch_computer_resource(dcprops, hostmor)
        vmfmor = dcprops.vmFolder._obj

        # CREATE VM CONFIGURATION
        # get config target
        request = VI.QueryConfigTargetRequestMsg()
        _this = request.new__this(crprops.environmentBrowser._obj)
        _this.set_attribute_type(crprops.environmentBrowser._obj.get_attribute_type())
        request.set_element__this(_this)
        h = request.new_host(hostmor)
        h.set_attribute_type(hostmor.get_attribute_type())
        request.set_element_host(h)
        config_target = self.esx_server._proxy.QueryConfigTarget(request)._returnval

        # get default devices
        request = VI.QueryConfigOptionRequestMsg()
        _this = request.new__this(crprops.environmentBrowser._obj)
        _this.set_attribute_type(crprops.environmentBrowser._obj.get_attribute_type())
        request.set_element__this(_this)
        h = request.new_host(hostmor)
        h.set_attribute_type(hostmor.get_attribute_type())
        request.set_element_host(h)
        config_option = self.esx_server._proxy.QueryConfigOption(request)._returnval
        defaul_devs = config_option.DefaultDevice

        # get network name
        avaliable_networks = 0
        for net in networks:
            for n in config_target.Network:
                if n.Network.Accessible and n.Network.Name == net:
                    avaliable_networks += 1
                    break
        if len(networks) != avaliable_networks:
            msg = ""
            for n in config_target.Network:
                msg = "%s %s" % (msg, str(n.Network.Name))
            raise CreatorException("Couldn't find all networks; founded: %s" % msg)
            # raise ExistenceException("Couldn't find network")

            # get datastore
        ds = None
        for d in config_target.Datastore:
            if d.Datastore.Accessible and d.Datastore.Name == ds_name:
                ds = d.Datastore.Datastore
                ds_name = d.Datastore.Name
                break
        if not ds:
            raise CreatorException("Datastore is not available")
        volume_name = "[%s]" % ds_name

        # add parameters to the create vm task
        create_vm_request = VI.CreateVM_TaskRequestMsg()
        config = create_vm_request.new_config()
        vmfiles = config.new_files()
        vmfiles.set_element_vmPathName(volume_name)
        config.set_element_files(vmfiles)
        config.set_element_name(vm_name)
        config.set_element_annotation(description)
        config.set_element_memoryMB(memory_size)
        config.set_element_numCPUs(cpu_count)
        config.set_element_guestId(guestosid)
        devices = []

        # add a scsi controller
        disk_ctrl_key = 1
        scsi_ctrl_spec = config.new_deviceChange()
        scsi_ctrl_spec.set_element_operation("add")
        scsi_ctrl = VI.ns0.VirtualLsiLogicController_Def("scsi_ctrl").pyclass()
        scsi_ctrl.set_element_busNumber(0)
        scsi_ctrl.set_element_key(disk_ctrl_key)
        scsi_ctrl.set_element_sharedBus("noSharing")
        scsi_ctrl_spec.set_element_device(scsi_ctrl)
        devices.append(scsi_ctrl_spec)

        # find ide controller
        if iso:
            ide_ctlr = None
            for dev in defaul_devs:
                if dev.typecode.type[1] == "VirtualIDEController":
                    ide_ctlr = dev
                    # add a cdrom based on a physical device
            if ide_ctlr:
                cd_spec = config.new_deviceChange()
                cd_spec.set_element_operation("add")
                cd_ctrl = VI.ns0.VirtualCdrom_Def("cd_ctrl").pyclass()
                cd_device_backing = VI.ns0.VirtualCdromIsoBackingInfo_Def("cd_device_backing").pyclass()
                ds_ref = cd_device_backing.new_datastore(ds)
                ds_ref.set_attribute_type(ds.get_attribute_type())
                cd_device_backing.set_element_datastore(ds_ref)
                cd_device_backing.set_element_fileName("%s %s" % (volume_name, iso))
                cd_ctrl.set_element_backing(cd_device_backing)
                cd_ctrl.set_element_key(20)
                cd_ctrl.set_element_controllerKey(ide_ctlr.get_element_key())
                cd_ctrl.set_element_unitNumber(0)
                cd_spec.set_element_device(cd_ctrl)
                devices.append(cd_spec)

        # create a new disk - file based - for the vm
        if disk_size != 0:
            disk_spec = config.new_deviceChange()
            disk_spec.set_element_fileOperation("create")
            disk_spec.set_element_operation("add")
            disk_ctlr = VI.ns0.VirtualDisk_Def("disk_ctlr").pyclass()
            disk_backing = VI.ns0.VirtualDiskFlatVer2BackingInfo_Def("disk_backing").pyclass()
            disk_backing.set_element_fileName(volume_name)
            disk_backing.set_element_diskMode("persistent")
            disk_ctlr.set_element_key(0)
            disk_ctlr.set_element_controllerKey(disk_ctrl_key)
            disk_ctlr.set_element_unitNumber(0)
            disk_ctlr.set_element_backing(disk_backing)
            disk_ctlr.set_element_capacityInKB(disk_size)
            disk_spec.set_element_device(disk_ctlr)
            devices.append(disk_spec)

        # add a NIC. the network Name must be set as the device name to create the NIC.
        for network_name in networks:
            nic_spec = config.new_deviceChange()
            nic_spec.set_element_operation("add")
            nic_ctlr = VI.ns0.VirtualPCNet32_Def("nic_ctlr").pyclass()
            nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def("nic_backing").pyclass()
            nic_backing.set_element_deviceName(network_name)
            nic_ctlr.set_element_addressType("generated")
            nic_ctlr.set_element_backing(nic_backing)
            nic_ctlr.set_element_key(4)
            nic_spec.set_element_device(nic_ctlr)
            devices.append(nic_spec)

        config.set_element_deviceChange(devices)
        create_vm_request.set_element_config(config)
        folder_mor = create_vm_request.new__this(vmfmor)
        folder_mor.set_attribute_type(vmfmor.get_attribute_type())
        create_vm_request.set_element__this(folder_mor)
        rp_mor = create_vm_request.new_pool(rpmor)
        rp_mor.set_attribute_type(rpmor.get_attribute_type())
        create_vm_request.set_element_pool(rp_mor)
        host_mor = create_vm_request.new_host(hostmor)
        host_mor.set_attribute_type(hostmor.get_attribute_type())
        create_vm_request.set_element_host(host_mor)

        # CREATE THE VM - add option "wait"
        taskmor = self.esx_server._proxy.CreateVM_Task(create_vm_request)._returnval
        task = VITask(taskmor, self.esx_server)
        task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
        if task.get_state() == task.STATE_ERROR:
            self._disconnect_from_esx()
            raise CreatorException("Error creating vm: %s" % task.get_error_message())
예제 #29
0
def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name, guest, vm_extra_config, vm_hardware, vm_disk, vm_nic, state, force):
    spec = None
    changed = False
    changes = {}
    request = VI.ReconfigVM_TaskRequestMsg()
    shutdown = False
    poweron = vm.is_powered_on()

    memoryHotAddEnabled = bool(vm.properties.config.memoryHotAddEnabled)
    cpuHotAddEnabled = bool(vm.properties.config.cpuHotAddEnabled)
    cpuHotRemoveEnabled = bool(vm.properties.config.cpuHotRemoveEnabled)

    # Change Memory
    if 'memory_mb' in vm_hardware:

        if int(vm_hardware['memory_mb']) != vm.properties.config.hardware.memoryMB:
            spec = spec_singleton(spec, request, vm)

            if vm.is_powered_on():
                if force:
                    # No hot add but force
                    if not memoryHotAddEnabled:
                        shutdown = True
                    elif int(vm_hardware['memory_mb']) < vm.properties.config.hardware.memoryMB:
                        shutdown = True
                else:
                    # Fail on no hot add and no force
                    if not memoryHotAddEnabled:
                        module.fail_json(
                            msg="memoryHotAdd is not enabled. force is "
                            "required for shutdown")

                    # Fail on no force and memory shrink
                    elif int(vm_hardware['memory_mb']) < vm.properties.config.hardware.memoryMB:
                        module.fail_json(
                            msg="Cannot lower memory on a live VM. force is "
                            "required for shutdown")

            # set the new RAM size
            spec.set_element_memoryMB(int(vm_hardware['memory_mb']))
            changes['memory'] = vm_hardware['memory_mb']

    # ====( Config Memory )====#
    if 'num_cpus' in vm_hardware:
        if int(vm_hardware['num_cpus']) != vm.properties.config.hardware.numCPU:
            spec = spec_singleton(spec, request, vm)

            if vm.is_powered_on():
                if force:
                    # No hot add but force
                    if not cpuHotAddEnabled:
                        shutdown = True
                    elif int(vm_hardware['num_cpus']) < vm.properties.config.hardware.numCPU:
                        if not cpuHotRemoveEnabled:
                            shutdown = True
                else:
                    # Fail on no hot add and no force
                    if not cpuHotAddEnabled:
                        module.fail_json(
                            msg="cpuHotAdd is not enabled. force is "
                            "required for shutdown")

                    # Fail on no force and cpu shrink without hot remove
                    elif int(vm_hardware['num_cpus']) < vm.properties.config.hardware.numCPU:
                        if not cpuHotRemoveEnabled:
                            module.fail_json(
                                msg="Cannot lower CPU on a live VM without "
                                "cpuHotRemove. force is required for shutdown")

            spec.set_element_numCPUs(int(vm_hardware['num_cpus']))

            changes['cpu'] = vm_hardware['num_cpus']

    if len(changes):

        if shutdown and vm.is_powered_on():
            try:
                vm.power_off(sync_run=True)
                vm.get_status()

            except Exception, e:
                module.fail_json(
                    msg='Failed to shutdown vm %s: %s' % (guest, e)
                )

        request.set_element_spec(spec)
        ret = vsphere_client._proxy.ReconfigVM_Task(request)._returnval

        # Wait for the task to finish
        task = VITask(ret, vsphere_client)
        status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
        if status == task.STATE_SUCCESS:
            changed = True
        elif status == task.STATE_ERROR:
            module.fail_json(
                msg="Error reconfiguring vm: %s" % task.get_error_message())

        if vm.is_powered_off() and poweron:
            try:
                vm.power_on(sync_run=True)
            except Exception, e:
                module.fail_json(
                    msg='Failed to power on vm %s : %s' % (guest, e)
                )
                module.exit_json(changed=False)
        except Exception as e:
            viserver.disconnect()
            module.fail_json(msg=str(e))

        try:
            req = VI.MoveIntoFolder_TaskRequestMsg()
            req.set_element__this(folder_mor)
            req.set_element_list(vm_mors)
            task = VITask(viserver._proxy.MoveIntoFolder_Task(req).Returnval, viserver)
            task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])

            if task.get_state() == task.STATE_ERROR:
                viserver.disconnect()
                module.fail_json(msg="Error moving vm: %s to folder %s. Error: %s" %
                                 (found_vms, json.dumps(folder_structure), task.get_error_message()))
            else:
                changed = True
        except Exception as e:
            viserver.disconnect()
            module.fail_json(msg="Error Requesting VM Move: %s for VM: %s" % (found_vms, json.dumps(folder_structure), str(e)))

    viserver.disconnect()
    module.exit_json(
        changed=changed,
        changes=found_vms)


# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
if __name__ == '__main__':
예제 #31
0
def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name, guest, vm_extra_config, vm_hardware, vm_disk, vm_nic, state, force):
    spec = None
    changed = False
    changes = {}
    request = VI.ReconfigVM_TaskRequestMsg()
    shutdown = False

    memoryHotAddEnabled = bool(vm.properties.config.memoryHotAddEnabled)
    cpuHotAddEnabled = bool(vm.properties.config.cpuHotAddEnabled)
    cpuHotRemoveEnabled = bool(vm.properties.config.cpuHotRemoveEnabled)

    # Change Memory
    if vm_hardware['memory_mb']:

        if int(vm_hardware['memory_mb']) != vm.properties.config.hardware.memoryMB:
            spec = spec_singleton(spec, request, vm)

            if vm.is_powered_on():
                if force:
                    # No hot add but force
                    if not memoryHotAddEnabled:
                        shutdown = True
                    elif int(vm_hardware['memory_mb']) < vm.properties.config.hardware.memoryMB:
                        shutdown = True
                else:
                    # Fail on no hot add and no force
                    if not memoryHotAddEnabled:
                        module.fail_json(
                            msg="memoryHotAdd is not enabled. force is "
                            "required for shutdown")

                    # Fail on no force and memory shrink
                    elif int(vm_hardware['memory_mb']) < vm.properties.config.hardware.memoryMB:
                        module.fail_json(
                            msg="Cannot lower memory on a live VM. force is "
                            "required for shutdown")

            # set the new RAM size
            spec.set_element_memoryMB(int(vm_hardware['memory_mb']))
            changes['memory'] = vm_hardware['memory_mb']

    # ====( Config Memory )====#
    if vm_hardware['num_cpus']:
        if int(vm_hardware['num_cpus']) != vm.properties.config.hardware.numCPU:
            spec = spec_singleton(spec, request, vm)

            if vm.is_powered_on():
                if force:
                    # No hot add but force
                    if not cpuHotAddEnabled:
                        shutdown = True
                    elif int(vm_hardware['num_cpus']) < vm.properties.config.hardware.numCPU:
                        if not cpuHotRemoveEnabled:
                            shutdown = True
                else:
                    # Fail on no hot add and no force
                    if not cpuHotAddEnabled:
                        module.fail_json(
                            msg="cpuHotAdd is not enabled. force is "
                            "required for shutdown")

                    # Fail on no force and cpu shrink without hot remove
                    elif int(vm_hardware['num_cpus']) < vm.properties.config.hardware.numCPU:
                        if not cpuHotRemoveEnabled:
                            module.fail_json(
                                msg="Cannot lower CPU on a live VM without "
                                "cpuHotRemove. force is required for shutdown")

            spec.set_element_numCPUs(int(vm_hardware['num_cpus']))

            changes['cpu'] = vm_hardware['num_cpus']

    if len(changes):

        if shutdown and vm.is_powered_on():
            try:
                vm.power_off(sync_run=True)
                vm.get_status()

            except Exception, e:
                module.fail_json(
                    msg='Failed to shutdown vm %s: %s' % (guest, e)
                )

        request.set_element_spec(spec)
        ret = vsphere_client._proxy.ReconfigVM_Task(request)._returnval

        # Wait for the task to finish
        task = VITask(ret, vsphere_client)
        status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
        if status == task.STATE_SUCCESS:
            changed = True
        elif status == task.STATE_ERROR:
            module.fail_json(
                msg="Error reconfiguring vm: %s" % task.get_error_message())

        if vm.is_powered_off():
            try:
                vm.power_on(sync_run=True)
            except Exception, e:
                module.fail_json(
                    msg='Failed to power on vm %s : %s' % (guest, e)
                )
예제 #32
0
    create_vm_request.set_element__this(folder_mor)
    rp_mor = create_vm_request.new_pool(rpmor)
    rp_mor.set_attribute_type(rpmor.get_attribute_type())
    create_vm_request.set_element_pool(rp_mor)
    host_mor = create_vm_request.new_host(hostmor)
    host_mor.set_attribute_type(hostmor.get_attribute_type())
    create_vm_request.set_element_host(host_mor)

    # CREATE THE VM
    taskmor = vsphere_client._proxy.CreateVM_Task(create_vm_request)._returnval
    task = VITask(taskmor, vsphere_client)
    task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if task.get_state() == task.STATE_ERROR:
        vsphere_client.disconnect()
        module.fail_json(msg="Error creating vm: %s" %
                         task.get_error_message())
    else:
        # We always need to get the vm because we are going to gather facts
        vm = vsphere_client.get_vm_by_name(guest)

        # VM was created. If there is any extra config options specified, set
        # them here , disconnect from vcenter, then exit.
        if vm_extra_config:
            vm.set_extra_config(vm_extra_config)

        # Power on the VM if it was requested
        power_state(vm, state, True)

        vsphere_client.disconnect()
        module.exit_json(
            ansible_facts=gather_facts(vm),
예제 #33
0
    raise Exception("The vm seems to lack a Virtual Nic")

#Set Nic macAddress to Manual and set address
net_device.set_element_addressType("Manual")
net_device.set_element_macAddress(new_mac)

#Invoke ReconfigVM_Task
request = VI.ReconfigVM_TaskRequestMsg()
_this = request.new__this(vm._mor)
_this.set_attribute_type(vm._mor.get_attribute_type())
request.set_element__this(_this)
spec = request.new_spec()
dev_change = spec.new_deviceChange()
dev_change.set_element_device(net_device)
dev_change.set_element_operation("edit")
spec.set_element_deviceChange([dev_change])
request.set_element_spec(spec)
ret = s._proxy.ReconfigVM_Task(request)._returnval

#Wait for the task to finish
task = VITask(ret, s)

status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
if status == task.STATE_SUCCESS:
    print "VM successfully reconfigured"
elif status == task.STATE_ERROR:
    print "Error reconfiguring vm:", task.get_error_message()

#Disconnect from the server
s.disconnect()