Example #1
0
    def add_existence_vmdk(self, vm_name, path):
        """
        Add existence hard drive (.vmdk) to the virtual machine
        :param vm_name: virtual machine name
        :param path: hard drive path
        :param space: space for hard drive
        :raise: ExistenceException, CreatorException
        """
        self._connect_to_esx()
        try:
            vm = self.esx_server.get_vm_by_name(vm_name)
        except Exception:
            raise ExistenceException("Couldn't find the virtual machine %s" % vm_name)

        unit_number = -1
        for disk in vm._disks:
            unit_number = max(unit_number, disk["device"]["unitNumber"])
        unit_number += 1

        request = VI.ReconfigVM_TaskRequestMsg()
        _this = request.new__this(vm._mor)
        _this.set_attribute_type(vm._mor.get_attribute_type())
        request.set_element__this(_this)

        spec = request.new_spec()

        dc = spec.new_deviceChange()
        dc.Operation = "add"

        hd = VI.ns0.VirtualDisk_Def("hd").pyclass()
        hd.Key = -100
        hd.UnitNumber = unit_number
        hd.CapacityInKB = 0
        hd.ControllerKey = 1000

        backing = VI.ns0.VirtualDiskFlatVer2BackingInfo_Def("backing").pyclass()
        backing.FileName = path
        backing.DiskMode = "persistent"
        backing.ThinProvisioned = False
        hd.Backing = backing

        connectable = hd.new_connectable()
        connectable.StartConnected = True
        connectable.AllowGuestControl = False
        connectable.Connected = True
        hd.Connectable = connectable

        dc.Device = hd

        spec.DeviceChange = [dc]
        request.Spec = spec

        task = self.esx_server._proxy.ReconfigVM_Task(request)._returnval
        vi_task = VITask(task, self.esx_server)

        # Wait for task to finis
        status = vi_task.wait_for_state([vi_task.STATE_SUCCESS, vi_task.STATE_ERROR])
        if status == vi_task.STATE_ERROR:
            self._disconnect_from_esx()
            raise CreatorException("ERROR CONFIGURING VM:%s" % vi_task.get_error_message())
Example #2
0
    def destroy_vm(self, vmname):
        """
        Destroys virtual machine by name
        :param vmname: virtual machine name
        :raise: ExistenceException, CreatorException
        """

        self._connect_to_esx()

        try:
            vm = self.esx_server.get_vm_by_name(vmname)
        except Exception as error:
            self._disconnect_from_esx()
            raise ExistenceException("Couldn't find VM '%s' - %s" % (vmname, error.message))

        try:
            if vm.is_powered_on() or vm.is_powering_off() or vm.is_reverting():
                vm.power_off()
            request = VI.Destroy_TaskRequestMsg()
            _this = request.new__this(vm._mor)
            _this.set_attribute_type(vm._mor.get_attribute_type())
            request.set_element__this(_this)
            ret = self.esx_server._proxy.Destroy_Task(request)._returnval

            # Wait for the task to finish
            task = VITask(ret, self.esx_server)

            status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
            if status != task.STATE_SUCCESS:
                raise CreatorException("Couldn't destroy vm - " + task.get_error_message())
        except Exception:
            self._disconnect_from_esx()
            raise CreatorException("Couldn't destroy the virtual machine %s" % vmname)
    def rename(self, new_name, sync_run=True):
        """
        Renames this managed entity.
          * new_name: Any / (slash), \ (backslash), character used in this name
            element will be escaped. Similarly, any % (percent) character used
            in this name element will be escaped, unless it is used to start an
            escape sequence. A slash is escaped as %2F or %2f. A backslash is
            escaped as %5C or %5c, and a percent is escaped as %25.
          * sync_run: (default True), If False does not wait for the task to
            finish and returns an instance of a VITask for the user to monitor
            its progress
        """
        try:
            request = VI.Rename_TaskRequestMsg()
            _this = request.new__this(self._mor)
            _this.set_attribute_type(self._mor.get_attribute_type())
            request.set_element__this(_this)
            request.set_element_newName(new_name)

            task = self._server._proxy.Rename_Task(request)._returnval
            vi_task = VITask(task, self._server)
            if sync_run:
                status = vi_task.wait_for_state([vi_task.STATE_SUCCESS,
                                                 vi_task.STATE_ERROR])
                if status == vi_task.STATE_ERROR:
                    raise VITaskException(vi_task.info.error)
                return

            return vi_task

        except (VI.ZSI.FaultException), e:
            raise VIApiException(e)
Example #4
0
    def delete_vm(self, vm_name):
        """ VMWareSystem implementation of delete_vm. """
        if vm_name is None:
            raise Exception('Could not find a VM named %s.' % vm_name)
        else:
            try:
                vm = self.api.get_vm_by_name(vm_name)
            except VIException as ex:
                raise Exception(ex)

            if vm.is_powered_on():
                raise Exception('Could not stop %s because it\'s still running.' % vm_name)
            else:
                # When pysphere moves up to 0.1.8, we can just do:
                # vm.destroy()
                request = VI.Destroy_TaskRequestMsg()
                _this = request.new__this(vm._mor)
                _this.set_attribute_type(vm._mor.get_attribute_type())
                request.set_element__this(_this)
                rtn = self.api._proxy.Destroy_Task(request)._returnval

                task = VITask(rtn, self.api)
                status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
                if status == task.STATE_SUCCESS:
                    return True
        return False
def delete_vm(vsphere_client, module, guest, vm, force):
    try:

        if vm.is_powered_on():
            if force:
                try:
                    vm.power_off(sync_run=True)
                    vm.get_status()

                except Exception, e:
                    module.fail_json(
                        msg='Failed to shutdown vm %s: %s' % (guest, e))
            else:
                module.fail_json(
                    msg='You must use either shut the vm down first or '
                    'use force ')

        # Invoke Destroy_Task
        request = VI.Destroy_TaskRequestMsg()
        _this = request.new__this(vm._mor)
        _this.set_attribute_type(vm._mor.get_attribute_type())
        request.set_element__this(_this)
        ret = vsphere_client._proxy.Destroy_Task(request)._returnval
        task = VITask(ret, vsphere_client)

        # Wait for the task to finish
        status = task.wait_for_state(
            [task.STATE_SUCCESS, task.STATE_ERROR])
        if status == task.STATE_ERROR:
            vsphere_client.disconnect()
            module.fail_json(msg="Error removing vm: %s %s" %
                             task.get_error_message())
        module.exit_json(changed=True, changes="VM %s deleted" % guest)
    def destroy(self, sync_run=True):
        """
        Destroys this object, deleting its contents and removing it from its
        parent folder (if any)
        * sync_run: (default True), If False does not wait for the task to
            finish and returns an instance of a VITask for the user to monitor
            its progress
        """
        try:
            request = VI.Destroy_TaskRequestMsg()
            _this = request.new__this(self._mor)
            _this.set_attribute_type(self._mor.get_attribute_type())
            request.set_element__this(_this)


            task = self._server._proxy.Destroy_Task(request)._returnval
            vi_task = VITask(task, self._server)
            if sync_run:
                status = vi_task.wait_for_state([vi_task.STATE_SUCCESS,
                                                 vi_task.STATE_ERROR])
                if status == vi_task.STATE_ERROR:
                    raise VITaskException(vi_task.info.error)
                return

            return vi_task
        except (VI.ZSI.FaultException), e:
            raise VIApiException(e)
Example #7
0
    def rename(self, new_name, sync_run=True):
        """
        Renames this managed entity.
          * new_name: Any / (slash), \ (backslash), character used in this name
            element will be escaped. Similarly, any % (percent) character used
            in this name element will be escaped, unless it is used to start an
            escape sequence. A slash is escaped as %2F or %2f. A backslash is
            escaped as %5C or %5c, and a percent is escaped as %25.
          * sync_run: (default True), If False does not wait for the task to
            finish and returns an instance of a VITask for the user to monitor
            its progress 
        """
        try:
            request = VI.Rename_TaskRequestMsg()
            _this = request.new__this(self._mor)
            _this.set_attribute_type(self._mor.get_attribute_type())
            request.set_element__this(_this)
            request.set_element_newName(new_name)

            task = self._server._proxy.Rename_Task(request)._returnval
            vi_task = VITask(task, self._server)
            if sync_run:
                status = vi_task.wait_for_state(
                    [vi_task.STATE_SUCCESS, vi_task.STATE_ERROR])
                if status == vi_task.STATE_ERROR:
                    raise VIException(vi_task.get_error_message(),
                                      FaultTypes.TASK_ERROR)
                return

            return vi_task

        except (VI.ZSI.FaultException) as e:
            raise VIApiException(e)
    def delete_file(self, path, datacenter=None, sync_run=True):
        """Deletes the specified file or folder from the datastore.
        If a file of a virtual machine is deleted, it may corrupt
        that virtual machine. Folder deletes are always recursive.
        """
        try:
            request = VI.DeleteDatastoreFile_TaskRequestMsg()
            _this = request.new__this(self._mor)
            _this.set_attribute_type(self._mor.get_attribute_type())
            request.set_element__this(_this)
            request.set_element_name(path)
            if datacenter:
                request.set_element_datacenter(datacenter)

            task = self._server._proxy.DeleteDatastoreFile_Task(request)._returnval
            vi_task = VITask(task, self._server)
            if sync_run:
                status = vi_task.wait_for_state([vi_task.STATE_SUCCESS,
                                                 vi_task.STATE_ERROR])
                if status == vi_task.STATE_ERROR:
                    raise VITaskException(vi_task.info.error)
                return

            return vi_task

        except (VI.ZSI.FaultException), e:
            raise VIApiException(e)
Example #9
0
def delete_vm(vm):
    vm_name = vm.get_property('name', from_cache=False)
    log("Preparing to delete VM %s..." % vm_name)
    vm_status = vm.get_status()
    log("VM status: %s" % vm_status)
    if vm_status == "POWERED OFF" or vm_status == "POWERING OFF":
        log("VM power state: %s" % vm_status)
    else:
        log("Powering off VM %s..." % vm_name)
        vm.power_off()

    log("Deleting VM %s..." % vm_name)
    request = VI.Destroy_TaskRequestMsg()
    _this = request.new__this(vm._mor)
    _this.set_attribute_type(vm._mor.get_attribute_type())
    request.set_element__this(_this)
    ret = server._proxy.Destroy_Task(request)._returnval

    # Wait for the delete task to finish
    task = VITask(ret, server)

    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status == task.STATE_SUCCESS:
        log("VM successfully deleted from disk.")
    elif status == task.STATE_ERROR:
        error_msg = "Error while deleting VM: " + task.get_error_message()
        raise Exception(error_msg)
Example #10
0
    def destroy(self, sync_run=True):
        """
        Destroys this object, deleting its contents and removing it from its 
        parent folder (if any)
        * sync_run: (default True), If False does not wait for the task to
            finish and returns an instance of a VITask for the user to monitor
            its progress
        """
        try:
            request = VI.Destroy_TaskRequestMsg()
            _this = request.new__this(self._mor)
            _this.set_attribute_type(self._mor.get_attribute_type())
            request.set_element__this(_this)

            task = self._server._proxy.Destroy_Task(request)._returnval
            vi_task = VITask(task, self._server)
            if sync_run:
                status = vi_task.wait_for_state(
                    [vi_task.STATE_SUCCESS, vi_task.STATE_ERROR])
                if status == vi_task.STATE_ERROR:
                    raise VIException(vi_task.get_error_message(),
                                      FaultTypes.TASK_ERROR)
                return

            return vi_task
        except (VI.ZSI.FaultException) as e:
            raise VIApiException(e)
Example #11
0
    def delete_instance(self, name=None):
        # Check if name is passed, if not extract from vm_json
        if not name:
            name = self.vm_json.get('name')

        # Initialise vcenter handle
        vcenter_handle = self.server
        try:
            vm = vcenter_handle.get_vm_by_name(name)
        except Exception:
            logger.info('VM %s not present in vCenter. This is OK' % name)
            return
        # Power off if not already
        if not vm.is_powered_off():
            vm.power_off()
        # Invoke Destroy_Task
        request = VI.Destroy_TaskRequestMsg()
        _this = request.new__this(vm._mor)
        _this.set_attribute_type(vm._mor.get_attribute_type())
        request.set_element__this(_this)
        ret = vcenter_handle._proxy.Destroy_Task(request)._returnval

        # Wait for the task to finish
        task = VITask(ret, vcenter_handle)

        status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
        if status == task.STATE_SUCCESS:
            logger.info('VM %s successfully deleted from disk' % name)
        elif status == task.STATE_ERROR:
            logger.info('Error removing vm: %s' % task.get_error_message())
Example #12
0
def delete_vm(vsphere_client, module, guest, vm, force):
    try:

        if vm.is_powered_on():
            if force:
                try:
                    vm.power_off(sync_run=True)
                    vm.get_status()

                except Exception, e:
                    module.fail_json(
                        msg='Failed to shutdown vm %s: %s' % (guest, e))
            else:
                module.fail_json(
                    msg='You must use either shut the vm down first or '
                    'use force ')

        # Invoke Destroy_Task
        request = VI.Destroy_TaskRequestMsg()
        _this = request.new__this(vm._mor)
        _this.set_attribute_type(vm._mor.get_attribute_type())
        request.set_element__this(_this)
        ret = vsphere_client._proxy.Destroy_Task(request)._returnval
        task = VITask(ret, vsphere_client)

        # Wait for the task to finish
        status = task.wait_for_state(
            [task.STATE_SUCCESS, task.STATE_ERROR])
        if status == task.STATE_ERROR:
            vsphere_client.disconnect()
            module.fail_json(msg="Error removing vm: %s %s" %
                             task.get_error_message())
        module.exit_json(changed=True, changes="VM %s deleted" % guest)
Example #13
0
    def register_vm(self, path, name=None, sync_run=True, folder=None,
                    template=False, resourcepool=None, host=None):
        """Adds an existing virtual machine to the folder.
        @path: a datastore path to the virtual machine.
            Example "[datastore] path/to/machine.vmx".
        @name: the name to be assigned to the virtual machine.
            If this parameter is not set, the displayName configuration
            parameter of the virtual machine is used.
        @sync_run: if True (default) waits for the task to finish, and returns
            a VIVirtualMachine instance with the new VM (raises an exception if
            the task didn't succeed). If @sync_run is set to False the task is
            started and a VITask instance is returned
        @folder_name: folder in which to register the virtual machine.
        @template: Flag to specify whether or not the virtual machine
            should be marked as a template.
        @resourcepool: MOR of the resource pool to which the virtual machine should
            be attached. If imported as a template, this parameter is not set.
        @host: The target host on which the virtual machine will run. This
            parameter must specify a host that is a member of the ComputeResource
            indirectly specified by the pool. For a stand-alone host or a cluster
            with DRS, the parameter can be omitted, and the system selects a default.
        """
        if not folder:
            folders = self._get_managed_objects(MORTypes.Folder)
            folder = [_mor for _mor, _name in folders.iteritems()
                          if _name == 'vm'][0]
        try:
            request = VI.RegisterVM_TaskRequestMsg()
            _this = request.new__this(folder)
            _this.set_attribute_type(folder.get_attribute_type())
            request.set_element__this(_this)
            request.set_element_path(path)
            if name:
                request.set_element_name(name)
            request.set_element_asTemplate(template)
            if resourcepool:
                pool = request.new_pool(resourcepool)
                pool.set_attribute_type(resourcepool.get_attribute_type())
                request.set_element_pool(pool)
            if host:
                if not VIMor.is_mor(host):
                    host = VIMor(host, MORTypes.HostSystem)
                    hs = request.new_host(host)
                    hs.set_attribute_type(host.get_attribute_type())
                    request.set_element_host(hs)

            task = self._proxy.RegisterVM_Task(request)._returnval
            vi_task = VITask(task, self)
            if sync_run:
                status = vi_task.wait_for_state([vi_task.STATE_SUCCESS,
                                                 vi_task.STATE_ERROR])
                if status == vi_task.STATE_ERROR:
                    raise VITaskException(vi_task.info.error)
                return

            return vi_task

        except (VI.ZSI.FaultException), e:
            raise VIApiException(e)
Example #14
0
    def register_vm(self, path, name=None, sync_run=True, folder=None,
                    template=False, resourcepool=None, host=None):
        """Adds an existing virtual machine to the folder.
        @path: a datastore path to the virtual machine.
            Example "[datastore] path/to/machine.vmx".
        @name: the name to be assigned to the virtual machine.
            If this parameter is not set, the displayName configuration
            parameter of the virtual machine is used.
        @sync_run: if True (default) waits for the task to finish, and returns
            a VIVirtualMachine instance with the new VM (raises an exception if
            the task didn't succeed). If @sync_run is set to False the task is
            started and a VITask instance is returned
        @folder_name: folder in which to register the virtual machine.
        @template: Flag to specify whether or not the virtual machine
            should be marked as a template.
        @resourcepool: MOR of the resource pool to which the virtual machine should
            be attached. If imported as a template, this parameter is not set.
        @host: The target host on which the virtual machine will run. This
            parameter must specify a host that is a member of the ComputeResource
            indirectly specified by the pool. For a stand-alone host or a cluster
            with DRS, the parameter can be omitted, and the system selects a default.
        """
        if not folder:
            folders = self._get_managed_objects(MORTypes.Folder)
            folder = [_mor for _mor, _name in folders.iteritems()
                          if _name == 'vm'][0]
        try:
            request = VI.RegisterVM_TaskRequestMsg()
            _this = request.new__this(folder)
            _this.set_attribute_type(folder.get_attribute_type())
            request.set_element__this(_this)
            request.set_element_path(path)
            if name:
                request.set_element_name(name)
            request.set_element_asTemplate(template)
            if resourcepool:
                pool = request.new_pool(resourcepool)
                pool.set_attribute_type(resourcepool.get_attribute_type())
                request.set_element_pool(pool)
            if host:
                if not VIMor.is_mor(host):
                    host = VIMor(host, MORTypes.HostSystem)
                    hs = request.new_host(host)
                    hs.set_attribute_type(host.get_attribute_type())
                    request.set_element_host(hs)

            task = self._proxy.RegisterVM_Task(request)._returnval
            vi_task = VITask(task, self)
            if sync_run:
                status = vi_task.wait_for_state([vi_task.STATE_SUCCESS,
                                                 vi_task.STATE_ERROR])
                if status == vi_task.STATE_ERROR:
                    raise VITaskException(vi_task.info.error)
                return

            return vi_task

        except (VI.ZSI.FaultException), e:
            raise VIApiException(e)
Example #15
0
def main():
    opts = options()

    # CONNECTION PARAMTERS
    server = opts.esx_host
    user = opts.user
    password = opts.passwd

    # REQUIRED PARAMETERS
    vmname = opts.name

    # CONNECT TO THE SERVER
    s = VIServer()
    s.connect(server, user, password)

    try:
        vm = s.get_vm_by_name(opts.name)
        vm.shutdown_guest()
        
        count = 1
        wait_for = 60
        try: 
            while count < wait_for and vm.is_powered_off() == False:
                count += 1
                time.sleep(1)
                print "Elapsed %s seconds ..." % str(count)

        except Exception as e:
            if count >= wait_for:
                print "Failed to shutdown the VM (%s) even after %s seconds." % (vmname, str(wait_for))
                print "Please login to the EXSi server and fix the issue. Exception: %s" % str(e)
                sys.exit(1)

        check_count(count, wait_for)
    except Exception as e:
        print "Failed to locate and shutdown the new VM using:", opts.name
        print "VM could not be deleted."
        print "Exception:", str(e)

    # Invoke Destroy_Task
    request = VI.Destroy_TaskRequestMsg()
    _this = request.new__this(vm._mor)
    _this.set_attribute_type(vm._mor.get_attribute_type())
    request.set_element__this(_this)
    ret = s._proxy.Destroy_Task(request)._returnval

    # Wait for the task to finish
    task = VITask(ret, s)

    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status == task.STATE_SUCCESS:
        print "VM successfully deleted from disk"
    elif status == task.STATE_ERROR:
        print "Error removing vm:", task.get_error_message()

    # disconnect from the server
    s.disconnect()
    def list_files(self, path, case_insensitive=True,
                   folders_first=True, match_patterns=[]):
        """Return a list of files in folder @path
        """

        ds_name, file_name = re.match(self._re_path, path).groups()
        ds = [k for k,v in self._server.get_datastores().items() if v == ds_name][0]
        browser_mor = VIProperty(self._server, ds).browser._obj

        request = VI.SearchDatastore_TaskRequestMsg()
        _this = request.new__this(browser_mor)
        _this.set_attribute_type(browser_mor.get_attribute_type())
        request.set_element__this(_this)
        request.set_element_datastorePath(path)

        search_spec = request.new_searchSpec()

        query = [VI.ns0.FloppyImageFileQuery_Def('floppy').pyclass(),
                 VI.ns0.FileQuery_Def('file').pyclass(),
                 VI.ns0.FolderFileQuery_Def('folder').pyclass(),
                 VI.ns0.IsoImageFileQuery_Def('iso').pyclass(),
                 VI.ns0.VmConfigFileQuery_Def('vm').pyclass(),
                 VI.ns0.TemplateConfigFileQuery_Def('template').pyclass(),
                 VI.ns0.VmDiskFileQuery_Def('vm_disk').pyclass(),
                 VI.ns0.VmLogFileQuery_Def('vm_log').pyclass(),
                 VI.ns0.VmNvramFileQuery_Def('vm_ram').pyclass(),
                 VI.ns0.VmSnapshotFileQuery_Def('vm_snapshot').pyclass()]
        search_spec.set_element_query(query)
        details = search_spec.new_details()
        details.set_element_fileOwner(True)
        details.set_element_fileSize(True)
        details.set_element_fileType(True)
        details.set_element_modification(True)
        search_spec.set_element_details(details)
        search_spec.set_element_searchCaseInsensitive(case_insensitive)
        search_spec.set_element_sortFoldersFirst(folders_first)
        search_spec.set_element_matchPattern(match_patterns)
        request.set_element_searchSpec(search_spec)
        response = self._server._proxy.SearchDatastore_Task(request)._returnval
        vi_task = VITask(response, self._server)
        if vi_task.wait_for_state([vi_task.STATE_ERROR, vi_task.STATE_SUCCESS]) == vi_task.STATE_ERROR:
            raise VITaskException(vi_task.info.error)
        info = vi_task.get_result()
        # return info

        if not hasattr(info, "file"):
            return []
        # for fi in info.file:
        #     fi._get_all()
        return [{'type':fi._type,
                 'path':fi.path,
                 'size':fi.fileSize,
                 'modified':fi.modification,
                 'owner':fi.owner
                } for fi in info.file]
Example #17
0
 def remove_vnic(self, label):
     '''Function removes a vnic network from an existing vcenter.
     Arguments:
     vm: vm from which vnic should be removed,
     label: name of vnic
     **Note**
     Vnics must be removed wih each time this class is instantiated.
     VNICs are stored in a temporary dictionary, so if VNICs are not
     removed at the end of this class instance, then they will have
     to be manually removed in the vcenter
     '''
     vm = self.vm_json.get('name')
     vm_obj = self.server.get_vm_by_name(vm, self.datacenter)
     if not vm_obj:
         raise Exception("VM %s not found" % vm)
     net_device = None
     if (vm, label) not in self.vm_vnics:
         raise Exception("vcenter removeVNIC error: vm_vnics not found: " +
                         str((vm, label)))
     # Find Virtual Nic device
     for dev in vm_obj.properties.config.hardware.device:
         if (dev._type in [
                 "VirtualE1000", "VirtualE1000e", "VirtualPCNet32",
                 "VirtualVmxnet3"
         ] and dev.deviceInfo.label == self.vm_vnics[(vm, label)][0]):
             net_device = dev._obj
             break
     if not net_device:
         raise Exception("NIC not found")
     # Reconfigure
     request = VI.ReconfigVM_TaskRequestMsg()
     _this = request.new__this(vm_obj._mor)
     _this.set_attribute_type(vm_obj._mor.get_attribute_type())
     request.set_element__this(_this)
     spec = request.new_spec()
     dev_change = spec.new_deviceChange()
     dev_change.set_element_operation("remove")
     dev_change.set_element_device(net_device)
     spec.set_element_deviceChange([dev_change])
     request.set_element_spec(spec)
     ret = self.server._proxy.ReconfigVM_Task(request)._returnval
     # Wait for the task to finish
     task = VITask(ret, self.server)
     status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
     if status == task.STATE_SUCCESS:
         logger.info("   removing vnic in %s on vm %s successful " %
                     (label, vm))
     elif status == task.STATE_ERROR:
         raise Exception("Error removing vnic in %s on vm %s msg %s" %
                         (label, vm, task.get_error_message()))
def disconnect_vm_cdroms(vm, server):
    connected_cdroms = []

    for dev in vm.properties.config.hardware.device:
        if dev._type == "VirtualCdrom":
        # and dev.connectable.connected:
        # if dev._type == "VirtualCdrom" and dev.connectable.startConnected:
            d = dev._obj
            try:
                d.Connectable.set_element_connected(False)
                d.Connectable.set_element_startConnected(False)
                connected_cdroms.append(d)
            except:
                log(level="warning", msg="%s: no virtual cd roms found." %
                    vm.properties.name)

    if not connected_cdroms:
        log(level="info", msg="%s: has no connected cd roms" %
            vm.properties.name)
        return

    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vm._mor)
    _this.set_attribute_type(vm._mor.get_attribute_type())
    request.set_element__this(_this)
    spec = request.new_spec()
    dev_changes = []

    for dev in connected_cdroms:
        change_cdrom_type(dev, "CLIENT DEVICE")
        dev_change = spec.new_deviceChange()
        dev_change.set_element_device(dev)
        dev_change.set_element_operation("edit")
        dev_changes.append(dev_change)

    spec.set_element_deviceChange(dev_changes)
    request.set_element_spec(spec)
    ret = server._proxy.ReconfigVM_Task(request)._returnval
    # Wait for the task to finish
    # Remove all this section if you don't wish to wait for the task to finish
    task = VITask(ret, server)
    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status == task.STATE_SUCCESS:
        log(level="info", msg="%s: successfully reconfigured" %
            vm.properties.name)
    elif status == task.STATE_ERROR:
        log(level="error", msg="%s: Error reconfiguring vm" %
            vm.properties.name)
Example #19
0
 def change_dvs_net(self, vm, pg_map, dc_name):
     """Takes a VIServer and VIVirtualMachine object and reconfigures
     dVS portgroups according to the mappings in the pg_map dict. The
     pg_map dict must contain the source portgroup as key and the
     destination portgroup as value"""
     # Find virtual NIC devices
     vm_obj = self.__s.get_vm_by_name(vm)
     uuid = self.GetDvsUuid(dc_name, pg_map)
     if vm_obj:
         net_device = []
         for dev in vm_obj.properties.config.hardware.device:
             if dev._type in ["VirtualE1000", "VirtualE1000e",
                             "VirtualPCNet32", "VirtualVmxnet",
                             "VirtualNmxnet2", "VirtualVmxnet3"]:
                 net_device.append(dev)
 
     # Throw an exception if there is no NIC found
     if len(net_device) == 0:
         raise Exception("The vm seems to lack a Virtual Nic")
     try:
         # Use pg_map to set the new Portgroups
         for dev in net_device:
             #old_portgroup = dev.backing.port.portgroupKey
             #if pg_map.has_key(old_portgroup):
             dev.backing.port._obj.set_element_portgroupKey(pg_map)
             dev.backing.port._obj.set_element_portKey('')
             dev.backing.port._obj.set_element_switchUuid(uuid)
     
         # Invoke ReconfigVM_Task
         request = VI.ReconfigVM_TaskRequestMsg()
         _this = request.new__this(vm_obj._mor)
         _this.set_attribute_type(vm_obj._mor.get_attribute_type())
         request.set_element__this(_this)
     
         # Build a list of device change spec objects
         devs_changed = []
         for dev in net_device:
             spec = request.new_spec()
             dev_change = spec.new_deviceChange()
             dev_change.set_element_device(dev._obj)
             dev_change.set_element_operation("edit")
             devs_changed.append(dev_change)
     
         # Submit the device change list
         spec.set_element_deviceChange(devs_changed)
         request.set_element_spec(spec)
         ret = self.__s._proxy.ReconfigVM_Task(request)._returnval
     
         # Wait for the task to finish
         task = VITask(ret, self.__s)
     
         #status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
     except Exception,e:
         item = '虚拟机网络更改失败'
         detail = e.message
         key = 'change_network'
         self.ReportError(item,detail,key)
         pass
Example #20
0
def apply_changes(vm, server, cdrom):
    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vm._mor)
    _this.set_attribute_type(vm._mor.get_attribute_type())
    request.set_element__this(_this)
    spec = request.new_spec()
    dev_change = spec.new_deviceChange()
    dev_change.set_element_device(cdrom)
    dev_change.set_element_operation("edit")
    spec.set_element_deviceChange([dev_change])
    request.set_element_spec(spec)
    ret = server._proxy.ReconfigVM_Task(request)._returnval
    task = VITask(ret, server)
    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status == task.STATE_SUCCESS:
        print "%s: successfully reconfigured" % vm.properties.name
    elif status == task.STATE_ERROR:
        print "%s: Error reconfiguring vm" % vm.properties.name
def destroyGuest(host_con, guest_name):
    powerOffGuest(host_con, guest_name)
    try:
        vm = host_con.get_vm_by_name(guest_name)
        request = VI.Destroy_TaskRequestMsg()
        _this = request.new__this(vm._mor)
        _this.set_attribute_type(vm._mor.get_attribute_type())
        request.set_element__this(_this)
        ret = host_con._proxy.Destroy_Task(request)._returnval
        task = VITask(ret, host_con)
        print 'Waiting for VM to be deleted'
        status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
        if status == task.STATE_SUCCESS:
            result = 'Succesfully removed guest: %s' % guest_name
        elif status == task.STATE_ERROR:
            result = 'Failed to remove VM: %s\n%s' % (guest_name, task.get_error_message())
    except Exception as e:
        result = 'Failed to remove VM: %s\n%s' % (guest_name, str(e))
    return result
Example #22
0
    def delete_vm(self, vm_name):
        vm = self._get_vm(vm_name)

        if vm.is_powered_on():
            self.stop_vm(vm_name)

        # When pysphere moves up to 0.1.8, we can just do:
        # vm.destroy()
        request = VI.Destroy_TaskRequestMsg()
        _this = request.new__this(vm._mor)
        _this.set_attribute_type(vm._mor.get_attribute_type())
        request.set_element__this(_this)
        rtn = self.api._proxy.Destroy_Task(request)._returnval

        task = VITask(rtn, self.api)
        status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
        if status == task.STATE_SUCCESS:
            return True
        else:
            return False
Example #23
0
def destroyGuest(host_con, guest_name):
    powerOffGuest(host_con, guest_name)
    try:
        vm = host_con.get_vm_by_name(guest_name)
        request = VI.Destroy_TaskRequestMsg()
        _this = request.new__this(vm._mor)
        _this.set_attribute_type(vm._mor.get_attribute_type())
        request.set_element__this(_this)
        ret = host_con._proxy.Destroy_Task(request)._returnval
        task = VITask(ret, host_con)
        print 'Waiting for VM to be deleted'
        status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
        if status == task.STATE_SUCCESS:
            result = 'Succesfully removed guest: %s' % guest_name
        elif status == task.STATE_ERROR:
            result = 'Failed to remove VM: %s\n%s' % (guest_name,
                                                      task.get_error_message())
    except Exception as e:
        result = 'Failed to remove VM: %s\n%s' % (guest_name, str(e))
    return result
Example #24
0
    def delete_vm(self, vm_name):
        vm = self._get_vm(vm_name)

        if vm.is_powered_on():
            self.stop_vm(vm_name)

        # When pysphere moves up to 0.1.8, we can just do:
        # vm.destroy()
        request = VI.Destroy_TaskRequestMsg()
        _this = request.new__this(vm._mor)
        _this.set_attribute_type(vm._mor.get_attribute_type())
        request.set_element__this(_this)
        rtn = self.api._proxy.Destroy_Task(request)._returnval

        task = VITask(rtn, self.api)
        status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
        if status == task.STATE_SUCCESS:
            return True
        else:
            return False
Example #25
0
    def handler_revert_to_snapshot(self, task_id, parameters):
        vm_id = parameters['vm_id']
        snapshot_id = parameters['snapshot_id']

        vm_mor = VIMor(vm_id, MORTypes.VirtualMachine)
        snapshot_mor = VIMor(snapshot_id, MORTypes.VirtualMachineSnapshot)

        vm_properties_future = self.application.executor.submit(self.server._get_object_properties, vm_mor, ['name', 'snapshot'])

        request = VI.RevertToSnapshot_TaskRequestMsg()

        mor_snap = request.new__this(snapshot_mor)
        mor_snap.set_attribute_type(snapshot_mor.get_attribute_type())
        request.set_element__this(mor_snap)

        vm_name = None
        snapshot_name = None
        vm_properties = yield vm_properties_future
        for prop in vm_properties.PropSet:
            if prop.Name == 'name':
                vm_name = prop.Val
            elif prop.Name == 'snapshot':
                snapshot_dict = ActionHandler.build_snapshot_dict(prop.Val.RootSnapshotList)
                snapshot_name = snapshot_dict[snapshot_mor].Name

        TaskStatusHandler.update_task(task_id, 'Reverting {0} to {1}...'.format(vm_name, snapshot_name))

        vi_task = self.server._proxy.RevertToSnapshot_Task(request)._returnval

        vi_task = VITask(vi_task, self.server)
        status = yield self.application.executor.submit(
            vi_task.wait_for_state, [vi_task.STATE_SUCCESS,
                                     vi_task.STATE_ERROR])
        if status == vi_task.STATE_ERROR:
            raise VIException(vi_task.get_error_message(),
                              FaultTypes.TASK_ERROR)

        TaskStatusHandler.update_task(task_id, 'Successfully reverted {0} to {1}'.format(vm_name, snapshot_name))
        TaskStatusHandler.delete_task(task_id)

        self.send_vm_update(vm_id)
Example #26
0
 def __execute_net_device_reconfig_task(self, vm_obj, net_device):
     request = VI.ReconfigVM_TaskRequestMsg()
     _this = request.new__this(vm_obj._mor)
     _this.set_attribute_type(vm_obj._mor.get_attribute_type())
     request.set_element__this(_this)
     spec = request.new_spec()
     dev_change = spec.new_deviceChange()
     dev_change.set_element_operation("edit")
     dev_change.set_element_device(net_device)
     spec.set_element_deviceChange([dev_change])
     request.set_element_spec(spec)
     ret = self.server._proxy.ReconfigVM_Task(request)._returnval
     # Wait for the task to finish
     task = VITask(ret, self.server)
     status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
     if status == task.STATE_SUCCESS:
         logger.info("Net device reconfig task on vm %s successful " %
                     (vm_obj.properties.name))
     elif status == task.STATE_ERROR:
         raise Exception("Error: Net device reconfig task on vm %s msg %s" %
                         (vm_obj.properties.name, task.get_error_message()))
Example #27
0
    def destroy_node(self, node, ex_remove_files=True):
        """
        :param ex_remove_files: Remove all the files from the datastore.
        :type ex_remove_files: ``bool``
        """
        ex_remove_files = False
        vm = self._get_vm_for_node(node=node)

        server = self.connection.client

        # Based on code from
        # https://pypi.python.org/pypi/pyxenter
        if ex_remove_files:
            request = VI.Destroy_TaskRequestMsg()

            _this = request.new__this(vm._mor)
            _this.set_attribute_type(vm._mor.get_attribute_type())
            request.set_element__this(_this)
            ret = server._proxy.Destroy_Task(request)._returnval
            task = VITask(ret, server)

            # Wait for the task to finish
            status = task.wait_for_state([task.STATE_SUCCESS,
                                          task.STATE_ERROR])

            if status == task.STATE_ERROR:
                raise LibcloudError('Error destroying node: %s' %
                                    (task.get_error_message()))
        else:
            request = VI.UnregisterVMRequestMsg()

            _this = request.new__this(vm._mor)
            _this.set_attribute_type(vm._mor.get_attribute_type())
            request.set_element__this(_this)
            ret = server._proxy.UnregisterVM(request)
            task = VITask(ret, server)

        return True
Example #28
0
 def vm_delete(self, vminstance):
     try:
         for vms in vminstance:
             vm = vms["obj"]
             if vm.is_powered_on(): vm.power_off(sync_run=False)
             # Invoke Destroy_Task
             request = VI.Destroy_TaskRequestMsg()
             _this = request.new__this(vm._mor)
             _this.set_attribute_type(vm._mor.get_attribute_type())
             request.set_element__this(_this)
             ret = self.server._proxy.Destroy_Task(request)._returnval
             # Wait for the task to finish
             task = VITask(ret, self.server)
             status = task.wait_for_state(
                 [task.STATE_SUCCESS, task.STATE_ERROR])
             #if status == task.STATE_SUCCESS:
             #   return True
             #elif status == task.STATE_ERROR:
             #   print "Error removing vm:", task.get_error_message()
             #  return False
         return True
     except Exception, e:
         return False
    def copy_file(self, source_path, dest_path, source_datacenter=None,
                  dest_datacenter=None, force=False, sync_run=True):
        """Copies the source file or folder to the destination. If the destination
        file does not exist, it is created. If the destination file exists, the
        @force parameter determines whether to overwrite it with the source or not.
        Folders can be copied recursively. In this case, the destination, if it
        exists, must be a folder, else one will be created. Existing files on
        the destination that conflict with source files can be overwritten
        using the @force parameter.
        """
        try:
            request = VI.CopyDatastoreFile_TaskRequestMsg()
            _this = request.new__this(self._mor)
            _this.set_attribute_type(self._mor.get_attribute_type())
            request.set_element__this(_this)
            request.set_element_sourceName(source_path)
            request.set_element_destinationName(dest_path)
            if source_datacenter:
                request.set_element_sourceDatacenter(source_datacenter)
            if dest_datacenter:
                request.set_element_destinationDatacenter(dest_datacenter)
            request.set_element_force(force)

            task = self._server._proxy.CopyDatastoreFile_Task(request)._returnval
            vi_task = VITask(task, self._server)
            if sync_run:
                status = vi_task.wait_for_state([vi_task.STATE_SUCCESS,
                                                 vi_task.STATE_ERROR])
                if status == vi_task.STATE_ERROR:
                    raise VITaskException(vi_task.info.error)
                return

            return vi_task

        except (VI.ZSI.FaultException), e:
            raise VIApiException(e)
Example #30
0
    def delete_vm_by_name(self, name, remove_files=True):
        """
        Unregisters a VM and remove it files from the datastore by name.
        @name is the VM name.
        @remove_files - if True (default) will delete VM files from datastore.
        """
        statusLine = ''
        success = False

        if not self.__logged:
            raise VIException("Must call 'connect' before invoking this method", FaultTypes.NOT_CONNECTED)
        try:
            # Get VM
            vm = self.get_vm_by_name(name)

            if remove_files:
                # Invoke Destroy_Task
                request = VI.Destroy_TaskRequestMsg()

                _this = request.new__this(vm._mor)
                _this.set_attribute_type(vm._mor.get_attribute_type())
                request.set_element__this(_this)
                ret = self._proxy.Destroy_Task(request)._returnval
                task = VITask(ret, self)
                
                # Wait for the task to finish
                status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])

                if status == task.STATE_SUCCESS:
                    statusLine = "VM successfully unregistered and deleted from datastore"
                    success = True

                elif status == task.STATE_ERROR:
                    statusLine = "Error removing vm: {}".format(task.get_error_message())
                    success = False

            else:
                # Invoke UnregisterVMRequestMsg
                request = VI.UnregisterVMRequestMsg()

                _this = request.new__this(vm._mor)
                _this.set_attribute_type(vm._mor.get_attribute_type())
                request.set_element__this(_this)
                ret = self._proxy.UnregisterVM(request)
                task = VITask(ret, self)

                statusLine = "VM successfully unregistered (files still on datastore)"
                success = True

        except VI.ZSI.FaultException as e:
            raise VIApiException(e)

        finally:
            return success, statusLine
Example #31
0
    def destroy_node(self, node, ex_remove_files=True):
        """
        :param ex_remove_files: Remove all the files from the datastore.
        :type ex_remove_files: ``bool``
        """
        ex_remove_files = False
        vm = self._get_vm_for_node(node=node)

        server = self.connection.client

        # Based on code from
        # https://pypi.python.org/pypi/pyxenter
        if ex_remove_files:
            request = VI.Destroy_TaskRequestMsg()

            _this = request.new__this(vm._mor)
            _this.set_attribute_type(vm._mor.get_attribute_type())
            request.set_element__this(_this)

            # pylint: disable=no-member
            ret = server._proxy.Destroy_Task(request)._returnva
            # pylint: enable=no-member
            task = VITask(ret, server)

            # Wait for the task to finish
            status = task.wait_for_state(
                [task.STATE_SUCCESS, task.STATE_ERROR])

            if status == task.STATE_ERROR:
                raise LibcloudError('Error destroying node: %s' %
                                    (task.get_error_message()))
        else:
            request = VI.UnregisterVMRequestMsg()

            _this = request.new__this(vm._mor)
            _this.set_attribute_type(vm._mor.get_attribute_type())
            request.set_element__this(_this)
            ret = server._proxy.UnregisterVM(request)
            task = VITask(ret, server)

        return True
Example #32
0
    config.set_element_deviceChange(devices)
    create_vm_request.set_element_config(config)
    folder_mor = create_vm_request.new__this(vmfmor)
    folder_mor.set_attribute_type(vmfmor.get_attribute_type())
    create_vm_request.set_element__this(folder_mor)
    rp_mor = create_vm_request.new_pool(rpmor)
    rp_mor.set_attribute_type(rpmor.get_attribute_type())
    create_vm_request.set_element_pool(rp_mor)
    host_mor = create_vm_request.new_host(hostmor)
    host_mor.set_attribute_type(hostmor.get_attribute_type())
    create_vm_request.set_element_host(host_mor)

    # CREATE THE VM
    taskmor = vsphere_client._proxy.CreateVM_Task(create_vm_request)._returnval
    task = VITask(taskmor, vsphere_client)
    task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if task.get_state() == task.STATE_ERROR:
        vsphere_client.disconnect()
        module.fail_json(msg="Error creating vm: %s" %
                         task.get_error_message())
    else:
        # We always need to get the vm because we are going to gather facts
        vm = vsphere_client.get_vm_by_name(guest)

        # VM was created. If there is any extra config options specified, set
        # them here , disconnect from vcenter, then exit.
        if vm_extra_config:
            vm.set_extra_config(vm_extra_config)

        # Power on the VM if it was requested
Example #33
0
            for vm in vm_mors:
                if vm not in folder_children:
                    temp_mors.append(vm)
            vm_mors = temp_mors
            if len(vm_mors) == 0:
                viserver.disconnect()
                module.exit_json(changed=False)
        except Exception as e:
            viserver.disconnect()
            module.fail_json(msg=str(e))

        try:
            req = VI.MoveIntoFolder_TaskRequestMsg()
            req.set_element__this(folder_mor)
            req.set_element_list(vm_mors)
            task = VITask(
                viserver._proxy.MoveIntoFolder_Task(req).Returnval, viserver)
            task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])

            if task.get_state() == task.STATE_ERROR:
                viserver.disconnect()
                module.fail_json(
                    msg="Error moving vm: %s to folder %s. Error: %s" %
                    (found_vms, json.dumps(folder_structure),
                     task.get_error_message()))
            else:
                changed = True
        except Exception as e:
            viserver.disconnect()
            module.fail_json(msg="Error Requesting VM Move: %s for VM: %s" %
                             (found_vms, json.dumps(folder_structure), str(e)))
Example #34
0
    def create_vm(self, vm_options):
        """
         Creates a virtual machine on ESXi server
        :param vm_options: dict, which contain parameters for VM
        'vm_name'
        'iso'
        'datacenter_name'
        'datastore_name'
        'resource_pool_name'
        'networks'
        'description'
        'esx_hostname'
        'hard_drive'
        'guestosid'
        'memory_size'
        'cpu_count'
        'disk_size'
        See create_vm_old for details
        :raise: CreatorException, ExistenceException
        """
        self._connect_to_esx()

        # VM NAME
        vm_name = None
        try:
            vm_name = str(vm_options["vm_name"])
            vm_temp = self.esx_server.get_vm_by_name(vm_name)
            if vm_temp:
                raise ExistenceException('VM "%s" already exists' % vm_name)
        except KeyError:
            raise CreatorException("Must specify VM name")
        except pysphere.VIException as inst:
            if "[Object Not Found]" in str(inst):
                pass

        # HOSTNAME
        hosts = self.esx_server.get_hosts()
        try:
            esx_hostname = vm_options["esx_hostname"]
            if not esx_hostname:
                raise KeyError
            elif not (esx_hostname in hosts.values()):
                raise CreatorException("Couldn't find host '%s'" % esx_hostname)
        except KeyError:
            if len(hosts.values()) > 1:
                raise CreatorException("More than 1 host - must specify ESX Hostname")
            elif not hosts.values():
                raise CreatorException("Couldn't find available host")
            esx_hostname = hosts[0]
            # MOR and PROPERTIES
        hostmor = [k for k, v in hosts.items() if v == esx_hostname][0]
        hostprop = VIProperty(self.esx_server, hostmor)

        # DATACENTER - FIX EXCEPTION
        # todo: fix self.esx_server.get_datacenters().values()
        dcs = self.esx_server.get_datacenters()
        dc_values = dcs.values()
        try:
            dc_name = vm_options["datacenter_name"]
            if not dc_name in dc_values:
                raise CreatorException("Couldn't find datacenter '%s'" + dc_name)
        except KeyError:
            if len(dc_values) > 1:
                raise CreatorException("More than 1 datacenter - must specify ESX Hostname")
            elif not dc_values:
                raise CreatorException("Couldn't find available datacenter")
            dc_name = dc_values[0]
            # MOR and PROPERTIES
        dcmor = [k for k, v in dcs.items() if v == dc_name][0]
        dcprops = VIProperty(self.esx_server, dcmor)

        # DATASTORE
        dcs = hostprop.datastore
        try:
            ds_name = vm_options["datastore_name"]
            ds_list = []
            for ds in dcs:
                ds_list.append(ds.name)
            if not ds_name in ds_list:
                raise CreatorException("Couldn't find datastore or datastore is not available")
        except KeyError:
            if len(dcs) > 1:
                raise CreatorException("More than 1 datastore on ESX - must specify datastore name")
            elif not dcs:
                raise CreatorException("Couldn't find available datastore")
            ds_name = dcs[0].name

        # RESOURCE POOL
        resource_pool_name = ""
        try:
            resource_pool_name = vm_options["resource_pool_name"]
            if resource_pool_name == "/":
                pass
            elif resource_pool_name[0] != "/":
                resource_pool_name = "/{0}".format(resource_pool_name)
        except KeyError:
            resource_pool_name = "/"
        finally:
            rpmor = self._fetch_resource_pool(resource_pool_name, esx_hostname)
            if not rpmor:
                raise CreatorException("Couldn't find resource pool '%s'" % resource_pool_name)

        # NETWORKS
        try:
            networks = list(vm_options["networks"])
        except Exception:
            networks = []

        try:
            iso = vm_options["iso"]
            if iso == False:
                iso = None
            else:
                # todo: hide magic
                iso = iso[iso.find(ds_name) + len(ds_name) + 1 :]
        except KeyError:
            iso = None

        # Description
        try:
            description = vm_options["description"]
        except KeyError:
            description = "Description for VM %s" % vm_name

        try:
            guestosid = vm_options["guestosid"]
        except KeyError:
            guestosid = "otherGuest"

        try:
            memory_size = int(vm_options["memory_size"])
            if memory_size <= 0:
                raise CreatorException("Disk size must be greater than 0")
        except Exception:
            memory_size = DEFAULT_MEMORY_SIZE  # MB

        try:
            cpu_count = int(vm_options["cpu_count"])
        except Exception:
            cpu_count = DEFAULT_CPU_COUNT

        try:
            disk_size = int(vm_options["disk_size"])
            if disk_size < 0:
                raise CreatorException("Disk size must be greater than 0")
        except Exception:
            disk_size = DEFAULT_DISK_SIZE  # KB

        crprops = self._fetch_computer_resource(dcprops, hostmor)
        vmfmor = dcprops.vmFolder._obj

        # CREATE VM CONFIGURATION
        # get config target
        request = VI.QueryConfigTargetRequestMsg()
        _this = request.new__this(crprops.environmentBrowser._obj)
        _this.set_attribute_type(crprops.environmentBrowser._obj.get_attribute_type())
        request.set_element__this(_this)
        h = request.new_host(hostmor)
        h.set_attribute_type(hostmor.get_attribute_type())
        request.set_element_host(h)
        config_target = self.esx_server._proxy.QueryConfigTarget(request)._returnval

        # get default devices
        request = VI.QueryConfigOptionRequestMsg()
        _this = request.new__this(crprops.environmentBrowser._obj)
        _this.set_attribute_type(crprops.environmentBrowser._obj.get_attribute_type())
        request.set_element__this(_this)
        h = request.new_host(hostmor)
        h.set_attribute_type(hostmor.get_attribute_type())
        request.set_element_host(h)
        config_option = self.esx_server._proxy.QueryConfigOption(request)._returnval
        defaul_devs = config_option.DefaultDevice

        # get network name
        avaliable_networks = 0
        for net in networks:
            for n in config_target.Network:
                if n.Network.Accessible and n.Network.Name == net:
                    avaliable_networks += 1
                    break
        if len(networks) != avaliable_networks:
            msg = ""
            for n in config_target.Network:
                msg = "%s %s" % (msg, str(n.Network.Name))
            raise CreatorException("Couldn't find all networks; founded: %s" % msg)
            # raise ExistenceException("Couldn't find network")

            # get datastore
        ds = None
        for d in config_target.Datastore:
            if d.Datastore.Accessible and d.Datastore.Name == ds_name:
                ds = d.Datastore.Datastore
                ds_name = d.Datastore.Name
                break
        if not ds:
            raise CreatorException("Datastore is not available")
        volume_name = "[%s]" % ds_name

        # add parameters to the create vm task
        create_vm_request = VI.CreateVM_TaskRequestMsg()
        config = create_vm_request.new_config()
        vmfiles = config.new_files()
        vmfiles.set_element_vmPathName(volume_name)
        config.set_element_files(vmfiles)
        config.set_element_name(vm_name)
        config.set_element_annotation(description)
        config.set_element_memoryMB(memory_size)
        config.set_element_numCPUs(cpu_count)
        config.set_element_guestId(guestosid)
        devices = []

        # add a scsi controller
        disk_ctrl_key = 1
        scsi_ctrl_spec = config.new_deviceChange()
        scsi_ctrl_spec.set_element_operation("add")
        scsi_ctrl = VI.ns0.VirtualLsiLogicController_Def("scsi_ctrl").pyclass()
        scsi_ctrl.set_element_busNumber(0)
        scsi_ctrl.set_element_key(disk_ctrl_key)
        scsi_ctrl.set_element_sharedBus("noSharing")
        scsi_ctrl_spec.set_element_device(scsi_ctrl)
        devices.append(scsi_ctrl_spec)

        # find ide controller
        if iso:
            ide_ctlr = None
            for dev in defaul_devs:
                if dev.typecode.type[1] == "VirtualIDEController":
                    ide_ctlr = dev
                    # add a cdrom based on a physical device
            if ide_ctlr:
                cd_spec = config.new_deviceChange()
                cd_spec.set_element_operation("add")
                cd_ctrl = VI.ns0.VirtualCdrom_Def("cd_ctrl").pyclass()
                cd_device_backing = VI.ns0.VirtualCdromIsoBackingInfo_Def("cd_device_backing").pyclass()
                ds_ref = cd_device_backing.new_datastore(ds)
                ds_ref.set_attribute_type(ds.get_attribute_type())
                cd_device_backing.set_element_datastore(ds_ref)
                cd_device_backing.set_element_fileName("%s %s" % (volume_name, iso))
                cd_ctrl.set_element_backing(cd_device_backing)
                cd_ctrl.set_element_key(20)
                cd_ctrl.set_element_controllerKey(ide_ctlr.get_element_key())
                cd_ctrl.set_element_unitNumber(0)
                cd_spec.set_element_device(cd_ctrl)
                devices.append(cd_spec)

        # create a new disk - file based - for the vm
        if disk_size != 0:
            disk_spec = config.new_deviceChange()
            disk_spec.set_element_fileOperation("create")
            disk_spec.set_element_operation("add")
            disk_ctlr = VI.ns0.VirtualDisk_Def("disk_ctlr").pyclass()
            disk_backing = VI.ns0.VirtualDiskFlatVer2BackingInfo_Def("disk_backing").pyclass()
            disk_backing.set_element_fileName(volume_name)
            disk_backing.set_element_diskMode("persistent")
            disk_ctlr.set_element_key(0)
            disk_ctlr.set_element_controllerKey(disk_ctrl_key)
            disk_ctlr.set_element_unitNumber(0)
            disk_ctlr.set_element_backing(disk_backing)
            disk_ctlr.set_element_capacityInKB(disk_size)
            disk_spec.set_element_device(disk_ctlr)
            devices.append(disk_spec)

        # add a NIC. the network Name must be set as the device name to create the NIC.
        for network_name in networks:
            nic_spec = config.new_deviceChange()
            nic_spec.set_element_operation("add")
            nic_ctlr = VI.ns0.VirtualPCNet32_Def("nic_ctlr").pyclass()
            nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def("nic_backing").pyclass()
            nic_backing.set_element_deviceName(network_name)
            nic_ctlr.set_element_addressType("generated")
            nic_ctlr.set_element_backing(nic_backing)
            nic_ctlr.set_element_key(4)
            nic_spec.set_element_device(nic_ctlr)
            devices.append(nic_spec)

        config.set_element_deviceChange(devices)
        create_vm_request.set_element_config(config)
        folder_mor = create_vm_request.new__this(vmfmor)
        folder_mor.set_attribute_type(vmfmor.get_attribute_type())
        create_vm_request.set_element__this(folder_mor)
        rp_mor = create_vm_request.new_pool(rpmor)
        rp_mor.set_attribute_type(rpmor.get_attribute_type())
        create_vm_request.set_element_pool(rp_mor)
        host_mor = create_vm_request.new_host(hostmor)
        host_mor.set_attribute_type(hostmor.get_attribute_type())
        create_vm_request.set_element_host(host_mor)

        # CREATE THE VM - add option "wait"
        taskmor = self.esx_server._proxy.CreateVM_Task(create_vm_request)._returnval
        task = VITask(taskmor, self.esx_server)
        task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
        if task.get_state() == task.STATE_ERROR:
            self._disconnect_from_esx()
            raise CreatorException("Error creating vm: %s" % task.get_error_message())
Example #35
0
# Add a CD-ROM device to the VM.
add_cdrom("iso", cd_iso_location)
add_nic("vmxnet3", network_name, "dvs")
#add_nic("vmxnet3", "VM Network", "standard")

config.set_element_deviceChange(devices)
create_vm_request.set_element_config(config)
folder_mor = create_vm_request.new__this(vmfmor)
folder_mor.set_attribute_type(vmfmor.get_attribute_type())
create_vm_request.set_element__this(folder_mor)
rp_mor = create_vm_request.new_pool(rpmor)
rp_mor.set_attribute_type(rpmor.get_attribute_type())
create_vm_request.set_element_pool(rp_mor)
host_mor = create_vm_request.new_host(hostmor)
host_mor.set_attribute_type(hostmor.get_attribute_type())
create_vm_request.set_element_host(host_mor)

# CREATE THE VM
taskmor = s._proxy.CreateVM_Task(create_vm_request)._returnval
task = VITask(taskmor, s)
task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
if task.get_state() == task.STATE_ERROR:
    raise Exception("Error creating vm: %s" % task.get_error_message())

# If there is any extra config options specified, set them here.
if extra_config:
    vm = s.get_vm_by_name(vmname)
    vm.set_extra_config(extra_config)

s.disconnect()
Example #36
0
def createGuest(host_con,guest_dc,guest_host,guest_name,guest_ver,guest_mem,guest_cpu,guest_iso,guest_os,guest_disk_gb,guest_ds,guest_network,guest_enterbios):
    #get dc MOR from list
    dc_list=[k for k,v in host_con.get_datacenters().items() if v==guest_dc]
    if dc_list:
        dc_mor=dc_list[0]
    else:
        host_con.disconnect()
        return "Cannot find dc: "+guest_dc
    dc_props=VIProperty(host_con, dc_mor)
    #get vmFolder
    vmf_mor = dc_props.vmFolder._obj
    #get hostfolder MOR
    hf_mor=dc_props.hostFolder._obj
    #get computer resources MORs
    cr_mors=host_con._retrieve_properties_traversal(property_names=['name','host'],from_node=hf_mor,obj_type='ComputeResource')
    #get host MOR
    try:
        host_mor=[k for k,v in host_con.get_hosts().items() if v==guest_host][0]
    except IndexError:
        host_con.disconnect()
        return "Cannot find host: "+guest_host
    #get computer resource MOR for host
    cr_mor=None
    for cr in cr_mors:
        if cr_mor:
            break
        for p in cr.PropSet:
            if p.Name=="host":
                for h in p.Val.get_element_ManagedObjectReference():
                    if h==host_mor:
                         cr_mor=cr.Obj
                         break
                if cr_mor:
                    break
    cr_props=VIProperty(host_con,cr_mor)
    #get resource pool MOR
    rp_mor=cr_props.resourcePool._obj

    #build guest properties
    #get config target
    request=VI.QueryConfigTargetRequestMsg()
    _this=request.new__this(cr_props.environmentBrowser._obj)
    _this.set_attribute_type(cr_props.environmentBrowser._obj.get_attribute_type())
    request.set_element__this(_this)
    h=request.new_host(host_mor)
    h.set_attribute_type(host_mor.get_attribute_type())
    request.set_element_host(h)
    config_target=host_con._proxy.QueryConfigTarget(request)._returnval
    #get default devices
    request=VI.QueryConfigOptionRequestMsg()
    _this=request.new__this(cr_props.environmentBrowser._obj)
    _this.set_attribute_type(cr_props.environmentBrowser._obj.get_attribute_type())
    request.set_element__this(_this)
    h=request.new_host(host_mor)
    h.set_attribute_type(host_mor.get_attribute_type())
    request.set_element_host(h)
    config_option=host_con._proxy.QueryConfigOption(request)._returnval
    defaul_devs=config_option.DefaultDevice
    #get network names
    if guest_network:
        net_name=guest_network
    else:
        for net in config_target.Network:
            if net.Network.Accessible:
                net_name = net.Network.Name
    #get ds
    ds_target = None
    for d in config_target.Datastore:
        if d.Datastore.Accessible and (guest_ds and d.Datastore.Name==guest_ds) or (not guest_ds):
            ds_target=d.Datastore.Datastore
            guest_ds=d.Datastore.Name
            break
    if not ds_target:
        host_con.disconnect()
        return "Cannot find datastore: "+guest_ds
    ds_vol_name="[%s]" % guest_ds

    #create task request
    create_vm_request=VI.CreateVM_TaskRequestMsg()
    config=create_vm_request.new_config()
    #set location of vmx
    vm_files=config.new_files()
    vm_files.set_element_vmPathName(ds_vol_name)
    config.set_element_files(vm_files)
    if guest_enterbios:
        #set boot parameters
        vmboot=config.new_bootOptions()
        vmboot.set_element_enterBIOSSetup(True)
        config.set_element_bootOptions(vmboot)
    #set general parameters
    config.set_element_version(guest_ver)
    config.set_element_name(guest_name)
    config.set_element_memoryMB(guest_mem)
    config.set_element_memoryHotAddEnabled(True)
    config.set_element_numCPUs(guest_cpu)
    config.set_element_guestId(guest_os)
    config.set_element_cpuHotAddEnabled(True)

    #create devices
    devices = []
    #add controller to devices
    disk_ctrl_key=1
    scsi_ctrl_spec=config.new_deviceChange()
    scsi_ctrl_spec.set_element_operation('add')
    scsi_ctrl=VI.ns0.ParaVirtualSCSIController_Def("scsi_ctrl").pyclass()
    scsi_ctrl.set_element_busNumber(0)
    scsi_ctrl.set_element_key(disk_ctrl_key)
    scsi_ctrl.set_element_sharedBus("noSharing")
    scsi_ctrl_spec.set_element_device(scsi_ctrl)
    devices.append(scsi_ctrl_spec)
    #find ide controller
    ide_ctlr = None
    for dev in defaul_devs:
        if dev.typecode.type[1] == "VirtualIDEController":
            ide_ctlr = dev
    #add cdrom
    if ide_ctlr:
        cd_spec = config.new_deviceChange()
        cd_spec.set_element_operation('add')
        cd_ctrl = VI.ns0.VirtualCdrom_Def("cd_ctrl").pyclass()
        cd_device_backing =VI.ns0.VirtualCdromIsoBackingInfo_Def("cd_device_backing").pyclass()
        ds_ref = cd_device_backing.new_datastore(ds_target)
        ds_ref.set_attribute_type(ds_target.get_attribute_type())
        cd_device_backing.set_element_datastore(ds_ref)
        cd_device_backing.set_element_fileName("%s %s" % (ds_vol_name,guest_iso))
        cd_ctrl.set_element_backing(cd_device_backing)
        cd_ctrl.set_element_key(20)
        cd_ctrl.set_element_controllerKey(ide_ctlr.get_element_key())
        cd_ctrl.set_element_unitNumber(0)
        cd_spec.set_element_device(cd_ctrl)
        devices.append(cd_spec)
    #add disk
    disk_spec=config.new_deviceChange()
    disk_spec.set_element_fileOperation("create")
    disk_spec.set_element_operation("add")
    disk_ctlr=VI.ns0.VirtualDisk_Def("disk_ctlr").pyclass()
    disk_backing=VI.ns0.VirtualDiskFlatVer2BackingInfo_Def("disk_backing").pyclass()
    disk_backing.set_element_fileName(ds_vol_name)
    disk_backing.set_element_diskMode("persistent")
    disk_ctlr.set_element_key(0)
    disk_ctlr.set_element_controllerKey(disk_ctrl_key)
    disk_ctlr.set_element_unitNumber(0)
    disk_ctlr.set_element_backing(disk_backing)
    guest_disk_size=guest_disk_gb*1024*1024
    disk_ctlr.set_element_capacityInKB(guest_disk_size)
    disk_spec.set_element_device(disk_ctlr)
    devices.append(disk_spec)
    #add a network controller
    nic_spec = config.new_deviceChange()
    if net_name:
        nic_spec.set_element_operation("add")
        nic_ctlr = VI.ns0.VirtualVmxnet3_Def("nic_ctlr").pyclass()
        nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def("nic_backing").pyclass()
        nic_backing.set_element_deviceName(net_name)
        nic_ctlr.set_element_addressType("generated")
        nic_ctlr.set_element_backing(nic_backing)
        nic_ctlr.set_element_key(4)
        nic_spec.set_element_device(nic_ctlr)
        devices.append(nic_spec)

    #create vm request
    config.set_element_deviceChange(devices)
    create_vm_request.set_element_config(config)
    new_vmf_mor=create_vm_request.new__this(vmf_mor)
    new_vmf_mor.set_attribute_type(vmf_mor.get_attribute_type())
    new_rp_mor=create_vm_request.new_pool(rp_mor)
    new_rp_mor.set_attribute_type(rp_mor.get_attribute_type())
    new_host_mor=create_vm_request.new_host(host_mor)
    new_host_mor.set_attribute_type(host_mor.get_attribute_type())
    create_vm_request.set_element__this(new_vmf_mor)
    create_vm_request.set_element_pool(new_rp_mor)
    create_vm_request.set_element_host(new_host_mor)

    #finally actually create the guest :)
    task_mor=host_con._proxy.CreateVM_Task(create_vm_request)._returnval
    task=VITask(task_mor,host_con)
    task.wait_for_state([task.STATE_SUCCESS,task.STATE_ERROR])

    if task.get_state()==task.STATE_ERROR:
        return "Cannot create guest: "+task.get_error_message()
    else:
        return "Succesfully created guest: "+guest_name
Example #37
0
def change_dvs_net(s, vm_obj, hostname, dstlabel, curlabel):
    """Takes a VIServer and VIVirtualMachine object and reconfigures
    dVS portgroups according to the mappings in the pg_map dict. The
    pg_map dict must contain the source portgroup as key and the
    destination portgroup as value"""

    # Find virtual NIC devices
    pg_map = {}

    if vm_obj:
        net_device = []
        for dev in vm_obj.properties.config.hardware.device:
            if dev._type in [
                    "VirtualE1000", "VirtualE1000e", "VirtualPCNet32",
                    "VirtualVmxnet", "VirtualNmxnet2", "VirtualVmxnet3"
            ]:
                net_device.append(dev)
    if len(net_device) == 0:
        raise Exception("The vm seems to lack a Virtual Nic")

    # Lets get the information for the port group
    network_name = dstlabel
    network_name2 = curlabel

    for ds_mor, name in s.get_datacenters().items():
        dcprops = VIProperty(s, ds_mor)
        break

    # networkFolder managed object reference
    nfmor = dcprops.networkFolder._obj
    dvpg_mors = s._retrieve_properties_traversal(
        property_names=['name', 'key'],
        from_node=nfmor,
        obj_type='DistributedVirtualPortgroup')

    # Get the portgroup managed object.
    dvpg_mor = None
    for dvpg in dvpg_mors:
        if dvpg_mor:
            break
        for p in dvpg.PropSet:
            if p.Name == "name" and p.Val == network_name:
                dvpg_mor = dvpg
            if dvpg_mor:
                break

    # Get the portgroup managed object.
    dvpg_mor2 = None
    for dvpg2 in dvpg_mors:
        if dvpg_mor2:
            break
        for p in dvpg2.PropSet:
            if p.Name == "name" and p.Val == network_name2:
                dvpg_mor2 = dvpg2
            if dvpg_mor2:
                break

    if dvpg_mor == None:
        print "Didnt find the dvpg %s, exiting now" % (network_name)
        exit()

    if dvpg_mor2 == None:
        print "Didnt find the dvpg %s, exiting now" % (network_name)
        exit()

    # Get the portgroup key
    portgroupKey = None
    for p in dvpg_mor.PropSet:
        if p.Name == "key":
            portgroupKey = p.Val
    portgroupKey2 = None
    for p in dvpg_mor2.PropSet:
        if p.Name == "key":
            portgroupKey2 = p.Val

    # Use pg_map to set the new Portgroups
    pg_map[portgroupKey2] = portgroupKey
    for dev in net_device:
        old_portgroup = dev.backing.port.portgroupKey
        if pg_map.has_key(old_portgroup):
            dev.backing.port._obj.set_element_portgroupKey(
                pg_map[old_portgroup])
            dev.backing.port._obj.set_element_portKey('')

    # Invoke ReconfigVM_Task
    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vm_obj._mor)
    _this.set_attribute_type(vm_obj._mor.get_attribute_type())
    request.set_element__this(_this)

    # Build a list of device change spec objects
    devs_changed = []
    for dev in net_device:
        spec = request.new_spec()
        dev_change = spec.new_deviceChange()
        dev_change.set_element_device(dev._obj)
        dev_change.set_element_operation("edit")
        devs_changed.append(dev_change)

    # Submit the device change list
    spec.set_element_deviceChange(devs_changed)
    request.set_element_spec(spec)
    ret = s._proxy.ReconfigVM_Task(request)._returnval

    # Wait for the task to finish
    task = VITask(ret, s)

    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status == task.STATE_SUCCESS:
        print "SUCCESS --> VM '%s' successfully reconfigured!" % hostname
    elif status == task.STATE_ERROR:
        print "ERROR --> Something went wrong reconfiguring vm '%s'!" % hostname, task.get_error_message(
        )
    else:
        print "ERROR --> VM '%s' not found!" % hostname
Example #38
0
 def add_vnic(self, network):
     '''
     addVNIC adds a vnic to the desired vm_name
     Arguments:
     vm_name is the name of the vm that you wish to add the vnic to.
     network: the name of the vnic that you are adding
     **Note**
     Vnics must be removed wih each time this class is instantiated.
     VNICs are stored in a temporary dictionary, so if VNICs are not
     removed at the end of this class instance, then they will have
     to be manually removed in the vcenter
     '''
     vm_name = self.vm_json.get('name')
     # datacentername = "Dev_Datacenter"
     # hostname = "esx14.mgt.hawaii.edu"
     network_name = network
     s = self.server
     dcmor = None
     datacenters = s.get_datacenters()
     logger.info('datacenters ', datacenters)
     # "GET INITIAL PROPERTIES AND OBJECTS"
     for i in datacenters:
         vm_list = s.get_registered_vms(i)  # list of vms
         for k in vm_list:
             if vm_name in k:
                 vm = k
                 dcmor = i
                 break
     if dcmor is None:
         logger.info("Datacenters: %s " % (datacenters))
         for k, v in datacenters.items():
             # When a VM is renamed, pysphere query to vcenter does not seem
             # to find the new vm name in any data center. So when we don't
             # find a vm in any data center, we assume it belongs in
             # Sunnyvale datacenter.
             # TODO(bindu): add an option to testbed file to pass this as
             # an option.
             if v == 'Sunnyvale':
                 logger.info('Datacenter for Sunnyvale %s' % k)
                 logger.info('Failed to find VM %s in any data center %s' %
                             vm_name)
                 logger.info(
                     '   VM might have been renamed. Assume datacenter Sunnyvale'
                 )
                 dcmor = k
                 break
         # raise Exception("Failed to find VM %s in any data center" % (vm_name))
     dcprops = VIProperty(s, dcmor)
     # "networkFolder managed object reference"
     nfmor = dcprops.networkFolder._obj
     dvpg_mors = s._retrieve_properties_traversal(
         property_names=['name', 'key'],
         from_node=nfmor,
         obj_type='DistributedVirtualPortgroup')
     # "get the portgroup managed object."
     dvpg_mor = None
     for dvpg in dvpg_mors:
         if dvpg_mor:
             break
         for p in dvpg.PropSet:
             if p.Name == "name" and p.Val == network_name:
                 dvpg_mor = dvpg
             if dvpg_mor:
                 break
     if dvpg_mor is None:
         raise Exception("Didn't find the dvpg %s, exiting now" %
                         (network_name))
     # "Get the portgroup key"
     portgroupKey = None
     for p in dvpg_mor.PropSet:
         if p.Name == "key":
             portgroupKey = p.Val
     # "Grab the dvswitch uuid and portgroup properties"
     dvswitch_mors = s._retrieve_properties_traversal(
         property_names=['uuid', 'portgroup'],
         from_node=nfmor,
         obj_type='DistributedVirtualSwitch')
     dvswitch_mor = None
     # "Get the appropriate dvswitches managed object"
     for dvswitch in dvswitch_mors:
         if dvswitch_mor:
             break
         for p in dvswitch.PropSet:
             if p.Name == "portgroup":
                 pg_mors = p.Val.ManagedObjectReference
                 for pg_mor in pg_mors:
                     if dvswitch_mor:
                         break
                     key_mor = s._get_object_properties(
                         pg_mor, property_names=['key'])
                     for key in key_mor.PropSet:
                         if key.Val == portgroupKey:
                             dvswitch_mor = dvswitch
     # Get the switches uuid
     dvswitch_uuid = None
     for p in dvswitch_mor.PropSet:
         if p.Name == "uuid":
             dvswitch_uuid = p.Val
     # create_vm_request = VI.CreateVM_TaskRequestMsg()
     # config = create_vm_request.new_config()
     vm_obj = s.get_vm_by_name(vm_name, self.datacenter)
     vm = vm_obj
     net_device_mac = []
     for dev in vm.properties.config.hardware.device:
         if (dev._type in [
                 "VirtualE1000", "VirtualE1000e", "VirtualPCNet32",
                 "VirtualVmxnet3"
         ]):
             # print dev.macAddress
             net_device_mac.append(dev.macAddress)
     vm_obj = s.get_vm_by_name(vm_name, self.datacenter)
     # Invoke ReconfigVM_Task
     request = VI.ReconfigVM_TaskRequestMsg()
     _this = request.new__this(vm_obj._mor)  # get the resource pool
     _this.set_attribute_type(vm_obj._mor.get_attribute_type())
     request.set_element__this(_this)
     spec = request.new_spec()
     # add a NIC. the network Name must be set as the device name.
     dev_change = spec.new_deviceChange()
     dev_change.set_element_operation("add")
     nic_ctlr = VI.ns0.VirtualPCNet32_Def("nic_ctlr").pyclass()
     # nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def("nic_backing").pyclass()
     # nic_backing.set_element_deviceName(label)
     nic_backing_port = VI.ns0.DistributedVirtualSwitchPortConnection_Def(
         "nic_backing_port").pyclass()
     nic_backing_port.set_element_switchUuid(dvswitch_uuid)
     nic_backing_port.set_element_portgroupKey(portgroupKey)
     nic_backing = VI.ns0.VirtualEthernetCardDistributedVirtualPortBackingInfo_Def(
         "nic_backing").pyclass()
     nic_backing.set_element_port(nic_backing_port)
     # print inspect.getmembers(nic_backing)
     # nic_backing.deviceName == network
     nic_ctlr.set_element_addressType("generated")
     nic_ctlr.set_element_backing(nic_backing)
     nic_ctlr.set_element_key(4)
     dev_change.set_element_device(nic_ctlr)
     spec.set_element_deviceChange([dev_change])
     request.set_element_spec(spec)
     ret = self.server._proxy.ReconfigVM_Task(request)._returnval
     # net_device.Connectable.Connected = True
     # Wait for the task to finish
     task = VITask(ret, self.server)
     status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
     vm = self.server.get_vm_by_name(vm_name, self.datacenter)
     for dev in vm.properties.config.hardware.device:
         if (dev._type in [
                 "VirtualE1000", "VirtualE1000e", "VirtualPCNet32",
                 "VirtualVmxnet3"
         ] and dev.macAddress not in net_device_mac):
             # print dev.macAddress
             self.vm_vnics[(vm_name, network)] = (dev.deviceInfo.label,
                                                  dev.macAddress)
     if status == task.STATE_SUCCESS:
         logger.info("vnic %s on vm %s successfully added" %
                     (dev.macAddress, vm_name))
     elif status == task.STATE_ERROR:
         logger.info("Error adding vm: %s" % vm_name,
                     task.get_error_message())
     return dev.macAddress
            for vm in vm_mors:
                if vm not in folder_children:
                    temp_mors.append(vm)
            vm_mors = temp_mors
            if len(vm_mors) == 0:
                viserver.disconnect()
                module.exit_json(changed=False)
        except Exception as e:
            viserver.disconnect()
            module.fail_json(msg=str(e))

        try:
            req = VI.MoveIntoFolder_TaskRequestMsg()
            req.set_element__this(folder_mor)
            req.set_element_list(vm_mors)
            task = VITask(viserver._proxy.MoveIntoFolder_Task(req).Returnval, viserver)
            task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])

            if task.get_state() == task.STATE_ERROR:
                viserver.disconnect()
                module.fail_json(msg="Error moving vm: %s to folder %s. Error: %s" %
                                 (found_vms, json.dumps(folder_structure), task.get_error_message()))
            else:
                changed = True
        except Exception as e:
            viserver.disconnect()
            module.fail_json(msg="Error Requesting VM Move: %s for VM: %s" % (found_vms, json.dumps(folder_structure), str(e)))

    viserver.disconnect()
    module.exit_json(
        changed=changed,
Example #40
0
    raise Exception("The vm seems to lack a Virtual Nic")

#Set Nic macAddress to Manual and set address
net_device.set_element_addressType("Manual")
net_device.set_element_macAddress(new_mac)

#Invoke ReconfigVM_Task
request = VI.ReconfigVM_TaskRequestMsg()
_this = request.new__this(vm._mor)
_this.set_attribute_type(vm._mor.get_attribute_type())
request.set_element__this(_this)
spec = request.new_spec()
dev_change = spec.new_deviceChange()
dev_change.set_element_device(net_device)
dev_change.set_element_operation("edit")
spec.set_element_deviceChange([dev_change])
request.set_element_spec(spec)
ret = s._proxy.ReconfigVM_Task(request)._returnval

#Wait for the task to finish
task = VITask(ret, s)

status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
if status == task.STATE_SUCCESS:
    print "VM successfully reconfigured"
elif status == task.STATE_ERROR:
    print "Error reconfiguring vm:", task.get_error_message()

#Disconnect from the server
s.disconnect() 
Example #41
0
    #create vm request
    config.set_element_deviceChange(devices)
    create_vm_request.set_element_config(config)
    new_vmf_mor = create_vm_request.new__this(vmf_mor)
    new_vmf_mor.set_attribute_type(vmf_mor.get_attribute_type())
    new_rp_mor = create_vm_request.new_pool(rp_mor)
    new_rp_mor.set_attribute_type(rp_mor.get_attribute_type())
    new_host_mor = create_vm_request.new_host(host_mor)
    new_host_mor.set_attribute_type(host_mor.get_attribute_type())
    create_vm_request.set_element__this(new_vmf_mor)
    create_vm_request.set_element_pool(new_rp_mor)
    create_vm_request.set_element_host(new_host_mor)

    #finally actually create the guest :)
    task_mor = host_con._proxy.CreateVM_Task(create_vm_request)._returnval
    task = VITask(task_mor, host_con)
    task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])

    if task.get_state() == task.STATE_ERROR:
        return "Cannot create guest: " + task.get_error_message()
    else:
        return "Succesfully created guest: " + guest_name


def getMac(host_con, guest_name):
    vm = host_con.get_vm_by_name(guest_name)
    net = vm.get_property('net', from_cache=False)
    if net:
        for interface in net:
            mac = interface.get('mac_address', None)
            if mac:
    config.set_element_deviceChange(devices)
    create_vm_request.set_element_config(config)
    folder_mor = create_vm_request.new__this(vmfmor)
    folder_mor.set_attribute_type(vmfmor.get_attribute_type())
    create_vm_request.set_element__this(folder_mor)
    rp_mor = create_vm_request.new_pool(rpmor)
    rp_mor.set_attribute_type(rpmor.get_attribute_type())
    create_vm_request.set_element_pool(rp_mor)
    host_mor = create_vm_request.new_host(hostmor)
    host_mor.set_attribute_type(hostmor.get_attribute_type())
    create_vm_request.set_element_host(host_mor)

    # CREATE THE VM
    taskmor = vsphere_client._proxy.CreateVM_Task(create_vm_request)._returnval
    task = VITask(taskmor, vsphere_client)
    task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if task.get_state() == task.STATE_ERROR:
        vsphere_client.disconnect()
        module.fail_json(msg="Error creating vm: %s" %
                         task.get_error_message())
    else:
        # We always need to get the vm because we are going to gather facts
        vm = vsphere_client.get_vm_by_name(guest)

        # VM was created. If there is any extra config options specified, set
        # them here , disconnect from vcenter, then exit.
        if vm_extra_config:
            vm.set_extra_config(vm_extra_config)

        # Power on the VM if it was requested
def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name, guest, vm_extra_config, vm_hardware, vm_disk, vm_nic, state, force):
    spec = None
    changed = False
    changes = {}
    request = VI.ReconfigVM_TaskRequestMsg()
    shutdown = False
    poweron = vm.is_powered_on()

    memoryHotAddEnabled = bool(vm.properties.config.memoryHotAddEnabled)
    cpuHotAddEnabled = bool(vm.properties.config.cpuHotAddEnabled)
    cpuHotRemoveEnabled = bool(vm.properties.config.cpuHotRemoveEnabled)

    # Change Memory
    if 'memory_mb' in vm_hardware:

        if int(vm_hardware['memory_mb']) != vm.properties.config.hardware.memoryMB:
            spec = spec_singleton(spec, request, vm)

            if vm.is_powered_on():
                if force:
                    # No hot add but force
                    if not memoryHotAddEnabled:
                        shutdown = True
                    elif int(vm_hardware['memory_mb']) < vm.properties.config.hardware.memoryMB:
                        shutdown = True
                else:
                    # Fail on no hot add and no force
                    if not memoryHotAddEnabled:
                        module.fail_json(
                            msg="memoryHotAdd is not enabled. force is "
                            "required for shutdown")

                    # Fail on no force and memory shrink
                    elif int(vm_hardware['memory_mb']) < vm.properties.config.hardware.memoryMB:
                        module.fail_json(
                            msg="Cannot lower memory on a live VM. force is "
                            "required for shutdown")

            # set the new RAM size
            spec.set_element_memoryMB(int(vm_hardware['memory_mb']))
            changes['memory'] = vm_hardware['memory_mb']

    # ====( Config Memory )====#
    if 'num_cpus' in vm_hardware:
        if int(vm_hardware['num_cpus']) != vm.properties.config.hardware.numCPU:
            spec = spec_singleton(spec, request, vm)

            if vm.is_powered_on():
                if force:
                    # No hot add but force
                    if not cpuHotAddEnabled:
                        shutdown = True
                    elif int(vm_hardware['num_cpus']) < vm.properties.config.hardware.numCPU:
                        if not cpuHotRemoveEnabled:
                            shutdown = True
                else:
                    # Fail on no hot add and no force
                    if not cpuHotAddEnabled:
                        module.fail_json(
                            msg="cpuHotAdd is not enabled. force is "
                            "required for shutdown")

                    # Fail on no force and cpu shrink without hot remove
                    elif int(vm_hardware['num_cpus']) < vm.properties.config.hardware.numCPU:
                        if not cpuHotRemoveEnabled:
                            module.fail_json(
                                msg="Cannot lower CPU on a live VM without "
                                "cpuHotRemove. force is required for shutdown")

            spec.set_element_numCPUs(int(vm_hardware['num_cpus']))

            changes['cpu'] = vm_hardware['num_cpus']

    if len(changes):

        if shutdown and vm.is_powered_on():
            try:
                vm.power_off(sync_run=True)
                vm.get_status()

            except Exception, e:
                module.fail_json(
                    msg='Failed to shutdown vm %s: %s' % (guest, e)
                )

        request.set_element_spec(spec)
        ret = vsphere_client._proxy.ReconfigVM_Task(request)._returnval

        # Wait for the task to finish
        task = VITask(ret, vsphere_client)
        status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
        if status == task.STATE_SUCCESS:
            changed = True
        elif status == task.STATE_ERROR:
            module.fail_json(
                msg="Error reconfiguring vm: %s" % task.get_error_message())

        if vm.is_powered_off() and poweron:
            try:
                vm.power_on(sync_run=True)
            except Exception, e:
                module.fail_json(
                    msg='Failed to power on vm %s : %s' % (guest, e)
                )
Example #44
0
def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name, guest, vm_extra_config, vm_hardware, vm_disk, vm_nic, state, force):
    spec = None
    changed = False
    changes = {}
    request = VI.ReconfigVM_TaskRequestMsg()
    shutdown = False

    memoryHotAddEnabled = bool(vm.properties.config.memoryHotAddEnabled)
    cpuHotAddEnabled = bool(vm.properties.config.cpuHotAddEnabled)
    cpuHotRemoveEnabled = bool(vm.properties.config.cpuHotRemoveEnabled)

    # Change Memory
    if vm_hardware['memory_mb']:

        if int(vm_hardware['memory_mb']) != vm.properties.config.hardware.memoryMB:
            spec = spec_singleton(spec, request, vm)

            if vm.is_powered_on():
                if force:
                    # No hot add but force
                    if not memoryHotAddEnabled:
                        shutdown = True
                    elif int(vm_hardware['memory_mb']) < vm.properties.config.hardware.memoryMB:
                        shutdown = True
                else:
                    # Fail on no hot add and no force
                    if not memoryHotAddEnabled:
                        module.fail_json(
                            msg="memoryHotAdd is not enabled. force is "
                            "required for shutdown")

                    # Fail on no force and memory shrink
                    elif int(vm_hardware['memory_mb']) < vm.properties.config.hardware.memoryMB:
                        module.fail_json(
                            msg="Cannot lower memory on a live VM. force is "
                            "required for shutdown")

            # set the new RAM size
            spec.set_element_memoryMB(int(vm_hardware['memory_mb']))
            changes['memory'] = vm_hardware['memory_mb']

    # ====( Config Memory )====#
    if vm_hardware['num_cpus']:
        if int(vm_hardware['num_cpus']) != vm.properties.config.hardware.numCPU:
            spec = spec_singleton(spec, request, vm)

            if vm.is_powered_on():
                if force:
                    # No hot add but force
                    if not cpuHotAddEnabled:
                        shutdown = True
                    elif int(vm_hardware['num_cpus']) < vm.properties.config.hardware.numCPU:
                        if not cpuHotRemoveEnabled:
                            shutdown = True
                else:
                    # Fail on no hot add and no force
                    if not cpuHotAddEnabled:
                        module.fail_json(
                            msg="cpuHotAdd is not enabled. force is "
                            "required for shutdown")

                    # Fail on no force and cpu shrink without hot remove
                    elif int(vm_hardware['num_cpus']) < vm.properties.config.hardware.numCPU:
                        if not cpuHotRemoveEnabled:
                            module.fail_json(
                                msg="Cannot lower CPU on a live VM without "
                                "cpuHotRemove. force is required for shutdown")

            spec.set_element_numCPUs(int(vm_hardware['num_cpus']))

            changes['cpu'] = vm_hardware['num_cpus']

    if len(changes):

        if shutdown and vm.is_powered_on():
            try:
                vm.power_off(sync_run=True)
                vm.get_status()

            except Exception, e:
                module.fail_json(
                    msg='Failed to shutdown vm %s: %s' % (guest, e)
                )

        request.set_element_spec(spec)
        ret = vsphere_client._proxy.ReconfigVM_Task(request)._returnval

        # Wait for the task to finish
        task = VITask(ret, vsphere_client)
        status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
        if status == task.STATE_SUCCESS:
            changed = True
        elif status == task.STATE_ERROR:
            module.fail_json(
                msg="Error reconfiguring vm: %s" % task.get_error_message())

        if vm.is_powered_off():
            try:
                vm.power_on(sync_run=True)
            except Exception, e:
                module.fail_json(
                    msg='Failed to power on vm %s : %s' % (guest, e)
                )
    #create vm request
    config.set_element_deviceChange(devices) 
    create_vm_request.set_element_config(config)
    new_vmf_mor=create_vm_request.new__this(vmf_mor) 
    new_vmf_mor.set_attribute_type(vmf_mor.get_attribute_type()) 
    new_rp_mor=create_vm_request.new_pool(rp_mor) 
    new_rp_mor.set_attribute_type(rp_mor.get_attribute_type()) 
    new_host_mor=create_vm_request.new_host(host_mor) 
    new_host_mor.set_attribute_type(host_mor.get_attribute_type()) 
    create_vm_request.set_element__this(new_vmf_mor) 
    create_vm_request.set_element_pool(new_rp_mor) 
    create_vm_request.set_element_host(new_host_mor) 
    
    #finally actually create the guest :)
    task_mor=host_con._proxy.CreateVM_Task(create_vm_request)._returnval 
    task=VITask(task_mor,host_con) 
    task.wait_for_state([task.STATE_SUCCESS,task.STATE_ERROR]) 
    
    if task.get_state()==task.STATE_ERROR: 
        return "Cannot create guest: "+task.get_error_message()
    else:
        return "Succesfully created guest: "+guest_name

def getMac(host_con,guest_name):
    vm=host_con.get_vm_by_name(guest_name)
    net = vm.get_property('net', from_cache=False)
    if net:
        for interface in net:
            mac = interface.get('mac_address', None)
            if mac:
                return mac
Example #46
0
def create_vm():
    opts = options()

    # CONNECTION PARAMTERS
    server = opts.esx_host
    user = opts.user
    password = opts.passwd

    # REQUIRED PARAMETERS
    vmname = opts.name
    # datacentername = "ha-datacenter"
    datacentername = opts.datacenter
    hostname = opts.hostname
    annotation = "My Product Product Virtual Machine"
    memorysize = opts.ram
    cpucount = opts.cpus
    # cd_iso_location =
    # "iso/My_Product_2013_02_26_05_15_00.iso"
    # # located in the ESX datastore
    cd_iso_location = opts.iso
    guestosid = "centos64Guest"
    # find your os in
    # http://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/vim.vm.GuestOsDescriptor.GuestOsIdentifier.html
    disksize = (1024 ** 2) * 100  # In Kb: 1024 ** 2 (power) = 1GB; 1GB * 100 = 100GB.

    # OPTIONAL PARAMETERS

    datastorename = opts.datastore  # if None, will use the first datastore available

    # CONNECT TO THE SERVER
    s = VIServer()
    s.connect(server, user, password)

    # GET INITIAL PROPERTIES AND OBJECTS

     # get datacenter
    dcmor = s._get_datacenters()[datacentername]
    dcprops = VIProperty(s, dcmor)
     # get host folder
    hfmor = dcprops.hostFolder._obj

     # get computer resources
    crmors = s._retrieve_properties_traversal(property_names=['name',
                                                              'host'], from_node=hfmor, obj_type='ComputeResource')

     # get host
    for hosts in s.get_hosts().items():
        try:
            if hosts.index(hostname) == 1:
                hostmor = hosts[0]
        except:
            pass

    # get computer resource of this host
    crmor = None
    for cr in crmors:
        if crmor:
            break
        for p in cr.PropSet:
            # print 'p.Name:', p.Name
            if p.Name == "host":
                for h in p.Val.get_element_ManagedObjectReference():
                    if h == hostmor:
                        crmor = cr.Obj
                        break
                if crmor:
                    break
    crprops = VIProperty(s, crmor)

     # get resource pool
    rpmor = crprops.resourcePool._obj

     # get vmFolder
    vmfmor = dcprops.vmFolder._obj

    # CREATE VM CONFIGURATION

     # get config target
    request = VI.QueryConfigTargetRequestMsg()
    _this = request.new__this(crprops.environmentBrowser._obj)
    _this.set_attribute_type(crprops.environmentBrowser._obj.get_attribute_type())
    request.set_element__this(_this)
    h = request.new_host(hostmor)
    h.set_attribute_type(hostmor.get_attribute_type())
    request.set_element_host(h)
    config_target = s._proxy.QueryConfigTarget(request)._returnval

     # get default devices
    request = VI.QueryConfigOptionRequestMsg()
    _this = request.new__this(crprops.environmentBrowser._obj)
    _this.set_attribute_type(crprops.environmentBrowser._obj.get_attribute_type())
    request.set_element__this(_this)
    h = request.new_host(hostmor)
    h.set_attribute_type(hostmor.get_attribute_type())
    request.set_element_host(h)
    config_option = s._proxy.QueryConfigOption(request)._returnval
    defaul_devs = config_option.DefaultDevice

    # get network name
    # would be assigned to the last known working network interface.
    # in this case, it would be VM Network 2.
    network_name = None
    for n in config_target.Network:
        if n.Network.Accessible:
            network_name = n.Network.Name

    # can hard-code it as 'VM Network'

    # get datastore
    # Just verifies that the datastorename mentioned at the top matches with the
    # available list of datastores.
    ds = None
    for d in config_target.Datastore:
        if d.Datastore.Accessible and (datastorename and d.Datastore.Name
                                       == datastorename) or (not datastorename):
            ds = d.Datastore.Datastore
            datastorename = d.Datastore.Name
            break
    if not ds:
        raise Exception("couldn't find datastore")
    volume_name = "[%s]" % datastorename

     # add parameters to the create vm task
    create_vm_request = VI.CreateVM_TaskRequestMsg()
    config = create_vm_request.new_config()
    vmfiles = config.new_files()
    vmfiles.set_element_vmPathName(volume_name)
    config.set_element_files(vmfiles)
    config.set_element_name(vmname)
    config.set_element_annotation(annotation)
    config.set_element_memoryMB(memorysize)
    config.set_element_numCPUs(cpucount)
    config.set_element_guestId(guestosid)
    devices = []

     # add a scsi controller
    disk_ctrl_key = 1
    scsi_ctrl_spec = config.new_deviceChange()
    scsi_ctrl_spec.set_element_operation('add')
    scsi_ctrl = VI.ns0.VirtualLsiLogicController_Def("scsi_ctrl").pyclass()
    scsi_ctrl.set_element_busNumber(0)
    scsi_ctrl.set_element_key(disk_ctrl_key)
    scsi_ctrl.set_element_sharedBus("noSharing")

    scsi_ctrl_spec.set_element_device(scsi_ctrl)
    devices.append(scsi_ctrl_spec)

     # find ide controller
    ide_ctlr = None
    for dev in defaul_devs:
        if dev.typecode.type[1] == "VirtualIDEController":
            ide_ctlr = dev

     # add a cdrom based on a physical device
    if ide_ctlr:
        cd_spec = config.new_deviceChange()
        cd_spec.set_element_operation('add')
        cd_ctrl = VI.ns0.VirtualCdrom_Def("cd_ctrl").pyclass()
        cd_device_backing = VI.ns0.VirtualCdromIsoBackingInfo_Def("cd_device_backing").pyclass()
        ds_ref = cd_device_backing.new_datastore(ds)
        ds_ref.set_attribute_type(ds.get_attribute_type())
        cd_device_backing.set_element_datastore(ds_ref)
        cd_device_backing.set_element_fileName("%s %s" % (volume_name,
                                                          cd_iso_location))
        cd_ctrl.set_element_backing(cd_device_backing)
        cd_ctrl.set_element_key(20)
        cd_ctrl.set_element_controllerKey(ide_ctlr.get_element_key())
        cd_ctrl.set_element_unitNumber(0)
        cd_spec.set_element_device(cd_ctrl)
        devices.append(cd_spec)

     # create a new disk - file based - for the vm
    disk_spec = config.new_deviceChange()
    disk_spec.set_element_fileOperation("create")
    disk_spec.set_element_operation("add")
    disk_ctlr = VI.ns0.VirtualDisk_Def("disk_ctlr").pyclass()
    disk_backing = VI.ns0.VirtualDiskFlatVer2BackingInfo_Def("disk_backing").pyclass()
    disk_backing.set_element_fileName(volume_name)
    disk_backing.set_element_diskMode("persistent")
    disk_backing.ThinProvisioned = True
    disk_ctlr.set_element_key(0)
    disk_ctlr.set_element_controllerKey(disk_ctrl_key)
    disk_ctlr.set_element_unitNumber(0)
    disk_ctlr.set_element_backing(disk_backing)
    disk_ctlr.set_element_capacityInKB(disksize)
    disk_spec.set_element_device(disk_ctlr)
    devices.append(disk_spec)

     # add a NIC. the network Name must be set as the device name to create the NIC.
    nic_spec = config.new_deviceChange()
    if network_name:
        nic_spec.set_element_operation("add")
        nic_ctlr = VI.ns0.VirtualPCNet32_Def("nic_ctlr").pyclass()
        nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def("nic_backing").pyclass()
        nic_backing.set_element_deviceName(network_name)
        nic_ctlr.set_element_addressType("generated")
        nic_ctlr.set_element_backing(nic_backing)
        nic_ctlr.set_element_key(4)
        nic_spec.set_element_device(nic_ctlr)
        devices.append(nic_spec)

    config.set_element_deviceChange(devices)
    create_vm_request.set_element_config(config)
    folder_mor = create_vm_request.new__this(vmfmor)
    folder_mor.set_attribute_type(vmfmor.get_attribute_type())
    create_vm_request.set_element__this(folder_mor)
    rp_mor = create_vm_request.new_pool(rpmor)
    rp_mor.set_attribute_type(rpmor.get_attribute_type())
    create_vm_request.set_element_pool(rp_mor)
    host_mor = create_vm_request.new_host(hostmor)
    host_mor.set_attribute_type(hostmor.get_attribute_type())
    create_vm_request.set_element_host(host_mor)

    # CREATE THE VM
    taskmor = s._proxy.CreateVM_Task(create_vm_request)._returnval
    task = VITask(taskmor, s)
    task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])

    if task.get_state() == task.STATE_ERROR:
        raise Exception("Error creating vm: %s" %
                        task.get_error_message())

    # Here you should power your VM (refer to the pysphere documentation)
    # So it boots from the specified ISO location
    try:
        new_vm = s.get_vm_by_name(opts.name)
        connect_vm_cdroms(new_vm, s)
        try:
            new_vm.power_on()
        except Exception as e:
            print "Failed to power-on the new VM using:", opts.name
            print "Exception:", str(e)
    except Exception as e:
        print "Failed to locate the new VM using:", opts.name
        print "Exception:", str(e)
    # disconnect from the server
    s.disconnect()