コード例 #1
0
ファイル: vmware.py プロジェクト: pombredanne/simplestack
def enable_vmi(server, vm_obj):
    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vm_obj._mor)
    _this.set_attribute_type(vm_obj._mor.get_attribute_type())
    request.set_element__this(_this)
    spec = request.new_spec()

    device_config_specs = []

    media_device = VirtualMachineVMIROM()
    media_device.set_element_key(11000)
    device_config_spec = spec.new_deviceChange()
    device_config_spec.set_element_operation('add')
    device_config_spec.set_element_device(media_device)
    device_config_specs.append(device_config_spec)

    spec.set_element_deviceChange(device_config_specs)

    request.set_element_spec(spec)
    ret = server._proxy.ReconfigVM_Task(request)._returnval

    # Wait for the task to finish
    task = VITask(ret, server)
    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status != task.STATE_SUCCESS:
        raise HypervisorError("Guest:update %s" % task.get_error_message())
コード例 #2
0
def ReconfigureVM(vm, cdrom):
    # create a new VM Reconfigure Task
    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vm._mor)
    _this.set_attribute_type(vm._mor.get_attribute_type())
    request.set_element__this(_this)

    # new device change spec
    spec = request.new_spec()
    dev_change = spec.new_deviceChange()
    dev_change.set_element_device(cdrom)
    dev_change.set_element_operation("edit")

    spec.set_element_deviceChange([dev_change])
    request.set_element_spec(spec)
    ret = server._proxy.ReconfigVM_Task(request)._returnval

    task = VITask(ret, server)
    status = task.wait_for_state([task.STATE_SUCCESS,task.STATE_ERROR])
    if status == task.STATE_SUCCESS:
        print "%s: successfully reconfigured" % vm.properties.name
        return True
    elif status == task.STATE_ERROR:
        print "%s: Error reconfiguring vm"% vm.properties.name
        return False
コード例 #3
0
def reconfig(server, vm, cdrom):

    request = VI.ReconfigVM_TaskRequestMsg()

    _this = request.new__this(vm._mor)
    _this.set_attribute_type(vm._mor.get_attribute_type())

    request.set_element__this(_this)

    spec = request.new_spec()

    dev_change = spec.new_deviceChange()
    dev_change.set_element_device(cdrom)
    dev_change.set_element_operation('edit')

    spec.set_element_deviceChange([dev_change])

    request.set_element_spec(spec)

    ret = server._proxy.ReconfigVM_Task(request)._returnval

    task = VITask(ret, server)
    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])

    if status == task.STATE_SUCCESS:
        return True

    return False
コード例 #4
0
ファイル: vmware.py プロジェクト: LuizOz/simplestack
def enable_vmi(server, vm_obj):
    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vm_obj._mor)
    _this.set_attribute_type(vm_obj._mor.get_attribute_type())
    request.set_element__this(_this)
    spec = request.new_spec()

    device_config_specs = []

    media_device = VirtualMachineVMIROM()
    media_device.set_element_key(11000)
    device_config_spec = spec.new_deviceChange()
    device_config_spec.set_element_operation('add')
    device_config_spec.set_element_device(media_device)
    device_config_specs.append(device_config_spec)

    spec.set_element_deviceChange(device_config_specs)

    request.set_element_spec(spec)
    ret = server._proxy.ReconfigVM_Task(request)._returnval

    # Wait for the task to finish
    task = VITask(ret, server)
    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status != task.STATE_SUCCESS:
        raise HypervisorError("Guest:update %s" % task.get_error_message())
コード例 #5
0
    def add_vdisk(self, vminstance, datastore_name, vdisk_size_gb):
        '''add disk'''
        vdisk_size_kb = vdisk_size_gb * 1024 * 1024
        vm = vminstance

        # Virtual device node number
        Unit_Number = ""
        Temp_Number = 1
        # find the device to be removed
        while True:
            dev = [
                dev for dev in vm.properties.config.hardware.device
                if dev._type == "VirtualDisk" and dev.unitNumber == Temp_Number
            ]
            if len(dev) == 0:
                Unit_Number = Temp_Number
                break
            else:
                Temp_Number += 1
                continue

        request = VI.ReconfigVM_TaskRequestMsg()
        _this = request.new__this(vm._mor)
        _this.set_attribute_type(vm._mor.get_attribute_type())
        request.set_element__this(_this)

        spec = request.new_spec()

        dc = spec.new_deviceChange()
        dc.Operation = "add"
        dc.FileOperation = "create"

        hd = VI.ns0.VirtualDisk_Def("hd").pyclass()
        hd.Key = -100
        hd.UnitNumber = Unit_Number
        hd.CapacityInKB = vdisk_size_kb
        hd.ControllerKey = 1000
        backing = VI.ns0.VirtualDiskFlatVer2BackingInfo_Def(
            "backing").pyclass()
        backing.FileName = "[%s]" % datastore_name
        backing.DiskMode = "persistent"
        backing.Split = False
        backing.WriteThrough = False
        backing.ThinProvisioned = False
        backing.EagerlyScrub = False
        hd.Backing = backing
        dc.Device = hd
        spec.DeviceChange = [dc]
        request.Spec = spec
        task = self.s._proxy.ReconfigVM_Task(request)._returnval
        vi_task = VITask(task, self.s)
        # Wait for task to finis
        status = vi_task.wait_for_state(
            [vi_task.STATE_SUCCESS, vi_task.STATE_ERROR])
        if status == vi_task.STATE_ERROR:
            print "ERROR: %s" % (vi_task.STATE_ERROR)
            return False
        else:
            return True
        self.s.disconnect()
コード例 #6
0
ファイル: cpu_helper.py プロジェクト: bradbann/vCenterAPI
def change_cpu(vminstance=None, cpu_number=None, cpu_core=None):
    '''Modify the number and core number of the virtual machine CPU'''
    vm_obj = vminstance
    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vm_obj._mor)
    _this.set_attribute_type(vm_obj._mor.get_attribute_type())
    request.set_element__this(_this)
    spec = request.new_spec()

    if cpu_number is not None:
        spec.set_element_numCPUs(cpu_number)  # This sets CPU config to 2x1 (The two cpus are single-core)
    if cpu_core is not None:
        spec.set_element_numCoresPerSocket(cpu_core)  # This sets CPU config to 1x2 (instead of 2x2) 1 CPU dual-core

    request.set_element_spec(spec)
    ret = server._proxy.ReconfigVM_Task(request)._returnval
    task = VITask(ret, server)
    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status == task.STATE_SUCCESS:
        print "VM successfully reconfigured"
        return True
    elif status == task.STATE_ERROR:
        print "Error reconfiguring vm: %s" % task.get_error_message()
        return False
    server.disconnect()
コード例 #7
0
def set_vm_reservation(server,types,vm_name,reservation,level):
    vm_mor = server.get_vm_by_name(vm_name)
    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vm_mor._mor)
    _this.set_attribute_type(vm_mor._mor.get_attribute_type())
    request.set_element__this(_this)
    spec = request.new_spec()

    if types == 'cpu':
        spec.set_element_numCPUs(reservation)
    elif types == 'memory':
        spec.set_element_memoryMB(reservation)

    request.set_element_spec(spec)
    ret = server._proxy.ReconfigVM_Task(request)._returnval
    config_result = False
    task = VITask(ret, server)
    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status == task.STATE_SUCCESS:
        ret = "VM <" + vm_name + "> successfully reconfigured"
        config_result = True
    elif status == task.STATE_ERROR:
        #print "Error reconfiguring vm <" + vm_name + ">: %s" % task.get_error_message()
        #print task.get_info()
        #print task.get_result()
        #print task.get_state()
        #print task.info
        ret = "Error reconfiguring vm <" + vm_name + ">: %s" % task.get_error_message()
        config_result = False
    return config_result
コード例 #8
0
def disconnect_nic_from_network(vCenterserver,
                                username,
                                password,
                                datacentername,
                                vmname,
                                dvswitch_uuid,
                                portgroupKey,
                                network_name="VM Network",
                                nic_type="vmxnet3",
                                network_type="standard"):

    con = vs_connect(vCenterserver, username, password)
    vm_obj = con.get_vm_by_name(vmname, datacenter=datacentername)

    #Disconnect 3rd adaptar if its connected to network "VM Network"
    #network_name = "VM Network"
    device_name = "Network adapter 3"

    #Find Virtual Nic device
    net_device = None
    for dev in vmname.properties.config.hardware.device:
        if (dev._type in [
                "VirtualE1000", "VirtualE1000e", "VirtualPCNet32",
                "VirtualVmxnet", "VirtualNmxnet2", "VirtualVmxnet3"
        ] and dev.deviceInfo.label == network_name
                and dev.deviceInfo.summary == device_name):
            net_device = dev._obj
            break

    if not net_device:
        s.disconnect()
        raise Exception("The vm seems to lack a Virtual Nic")

    #Disconnect the device
    net_device.Connectable.Connected = True

    #Invoke ReconfigVM_Task
    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vmname._mor)
    _this.set_attribute_type(vmname._mor.get_attribute_type())
    request.set_element__this(_this)
    spec = request.new_spec()
    dev_change = spec.new_deviceChange()
    dev_change.set_element_device(net_device)
    dev_change.set_element_operation("edit")
    spec.set_element_deviceChange([dev_change])
    request.set_element_spec(spec)
    ret = s._proxy.ReconfigVM_Task(request)._returnval

    #Wait for the task to finish
    task = VITask(ret, s)

    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status == task.STATE_SUCCESS:
        print "VM successfully reconfigured"
    elif status == task.STATE_ERROR:
        print "Error reconfiguring vm:", task.get_error_message()

    s.disconnect()
コード例 #9
0
ファイル: esxi.py プロジェクト: matelakat/ditty-box
 def perform_reconfig(self, request):
     ret = self.esxi_server._proxy.ReconfigVM_Task(request)._returnval
     task = VITask(ret, self.esxi_server)
     status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
     if status == task.STATE_SUCCESS:
         print "VM successfully reconfigured"
     elif status == task.STATE_ERROR:
         print "Error reconfiguring vm: %s" % task.get_error_message()
コード例 #10
0
def get_file_list(datastore,
                  path="/",
                  case_insensitive=True,
                  folders_first=True,
                  match_patterns=[]):

    ds = [k for k, v in s.get_datastores().items() if v == datastore][0]
    ds_browser = VIProperty(s, ds).browser._obj

    request = VI.SearchDatastore_TaskRequestMsg()
    _this = request.new__this(ds_browser)
    _this.set_attribute_type(ds_browser.get_attribute_type())
    request.set_element__this(_this)
    request.set_element_datastorePath("[%s] %s" % (datastore, path))

    search_spec = request.new_searchSpec()

    query = [
        VI.ns0.FloppyImageFileQuery_Def('floppy').pyclass(),
        VI.ns0.FolderFileQuery_Def('folder').pyclass(),
        VI.ns0.IsoImageFileQuery_Def('iso').pyclass(),
        VI.ns0.VmConfigFileQuery_Def('vm').pyclass(),
        VI.ns0.TemplateConfigFileQuery_Def('template').pyclass(),
        VI.ns0.VmDiskFileQuery_Def('vm_disk').pyclass(),
        VI.ns0.VmLogFileQuery_Def('vm_log').pyclass(),
        VI.ns0.VmNvramFileQuery_Def('vm_ram').pyclass(),
        VI.ns0.VmSnapshotFileQuery_Def('vm_snapshot').pyclass()
    ]
    search_spec.set_element_query(query)
    details = search_spec.new_details()
    details.set_element_fileOwner(True)
    details.set_element_fileSize(True)
    details.set_element_fileType(True)
    details.set_element_modification(True)
    search_spec.set_element_details(details)
    search_spec.set_element_searchCaseInsensitive(case_insensitive)
    search_spec.set_element_sortFoldersFirst(folders_first)
    search_spec.set_element_matchPattern(match_patterns)
    request.set_element_searchSpec(search_spec)
    response = s._proxy.SearchDatastore_Task(request)._returnval
    task = VITask(response, s)
    if task.wait_for_state([task.STATE_ERROR,
                            task.STATE_SUCCESS]) == task.STATE_ERROR:
        raise Exception(task.get_error_message())

    info = task.get_result()

    if not hasattr(info, "file"):
        return []
    return [
        {
            'path': fi.path,
            #'size':fi.fileSize,
            #'modified':fi.modification,
            #'owner':fi.owner
        } for fi in info.file
    ]
コード例 #11
0
def connect_publicNIC_to_publicNet(vCenterserver, username, password,
                                   datacentername, vm_name, network_name,
                                   netlabel):
    '''
  Switch existing NIC to a different network
  con: VIServer object
  datacentername: datacenter name
  vm_name: VIVirtualMachine name
  network_name: network name
  '''

    con = vs_connect(vCenterserver, username, password)
    net_device = None
    vm_obj = con.get_vm_by_name(vm_name, datacenter=datacentername)
    if not vm_obj:
        raise Exception("VM %s not found" % vm_name)

    #Find nic device
    for dev in vm_obj.properties.config.hardware.device:
        if (dev._type in [
                "VirtualE1000", "VirtualE1000e", "VirtualPCNet32",
                "VirtualVmxnet", "VirtualNmxnet2", "VirtualVmxnet3"
        ] and hasattr(dev, "deviceInfo")
                and (dev.deviceInfo.label == netlabel)):
            net_device = dev._obj
    if not net_device:
        raise Exception("The vm_name seems to lack a Virtual Nic")

    if hasattr(net_device.Backing, "DeviceName"):
        net_device.Connectable.Connected = True
        net_device.Backing.set_element_deviceName(network_name)

    if hasattr(net_device.Backing, "Port"):
        #TODO convert device baching
        net_device.Connectable.Connected = True

    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vm_obj._mor)
    _this.set_attribute_type(vm_obj._mor.get_attribute_type())
    request.set_element__this(_this)
    spec = request.new_spec()
    dev_change = spec.new_deviceChange()
    dev_change.set_element_device(net_device)
    dev_change.set_element_operation("edit")
    spec.set_element_deviceChange([dev_change])
    request.set_element_spec(spec)

    ret = con._proxy.ReconfigVM_Task(request)._returnval

    #Wait for the task to finish
    task = VITask(ret, con)
    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status == task.STATE_SUCCESS:
        return "VM successfully reconfigured"
    elif status == task.STATE_ERROR:
        return "failure reconfiguring vm_name: " + str(
            task.get_error_message())
コード例 #12
0
ファイル: select_network.py プロジェクト: morphizer/misc
def change_dvs_net(server, vm_obj, pg_map):
    """Takes a VIServer and VIVirtualMachine object and reconfigures
    dVS portgroups according to the mappings in the pg_map dict. The
    pg_map dict must contain the source portgroup as key and the
    destination portgroup as value"""
    # Find virtual NIC devices
    if vm_obj:
        net_device = []
        for dev in vm_obj.properties.config.hardware.device:
            if dev._type in ["VirtualE1000", "VirtualE1000e",
                            "VirtualPCNet32", "VirtualVmxnet",
                            "VirtualNmxnet2", "VirtualVmxnet3"]:
                net_device.append(dev)

    # Throw an exception if there is no NIC found
    if len(net_device) == 0:
        raise Exception("The vm seems to lack a Virtual Nic")

    # Use pg_map to set the new Portgroups
    for dev in net_device:
        old_portgroup = dev.backing.port.portgroupKey
        if pg_map.has_key(old_portgroup):
            dev.backing.port._obj.set_element_portgroupKey(
                pg_map[old_portgroup])
            dev.backing.port._obj.set_element_portKey('')

    # Invoke ReconfigVM_Task
    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vm_obj._mor)
    _this.set_attribute_type(vm_obj._mor.get_attribute_type())
    request.set_element__this(_this)

    # Build a list of device change spec objects
    devs_changed = []
    for dev in net_device:
        spec = request.new_spec()
        dev_change = spec.new_deviceChange()
        dev_change.set_element_device(dev._obj)
        dev_change.set_element_operation("edit")
        devs_changed.append(dev_change)

    # Submit the device change list
    spec.set_element_deviceChange(devs_changed)
    request.set_element_spec(spec)
    ret = server._proxy.ReconfigVM_Task(request)._returnval

    # Wait for the task to finish
    task = VITask(ret, server)

    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status == task.STATE_SUCCESS:
        print "VM %s successfully reconfigured" % vm_obj
    elif status == task.STATE_ERROR:
        print "Error reconfiguring vm: %s" % vm_obj, task.get_error_message()
    else:
        print "VM %s not found" % vm_obj
コード例 #13
0
 def _destroy_host(self, hostname):
     req = VI.Destroy_TaskRequestMsg()
     mor = (key for key, value in self.api.get_hosts().items() if value == hostname).next()
     sys = VIMor(mor, 'HostSystem')
     _this = req.new__this(sys)
     _this.set_attribute_type(sys.get_attribute_type())
     req.set_element__this(_this)
     task_mor = self.api._proxy.Destroy_Task(req)._returnval
     t = VITask(task_mor, self.api)
     wait_for(lambda: 'success' in t.get_state())
コード例 #14
0
ファイル: chain_gun.py プロジェクト: mooperd/chain_gun
def spawn_esx_vm(con, template, hostname, mac ):
	# Here we fetch the vm by its name #
	template_vm = find_vm(con, template)
	# template_vm = con.get_vm_by_name(template)
	print 'template vm is %s' %template_vm
	print 'new vm is %s' %hostname
	print 'mac is %s' %mac
	print ('Trying to clone %s to VM %s' % (template_vm,hostname))
	print template_vm
	print ('================================================================================')
	# Does the VM already exist? #
	if find_vm(con, hostname):
                print 'ERROR: %s already exists' % hostname
	else:
		clone = template_vm.clone(hostname, True, None, None, None, None, False)
		print ('VM %s created' % (hostname))

	# And now we need to change its MAC address. We expect to find two Vmxnet3 devices#
	interfaces = []
	macs = []
	# Query network interfaces from vCenter and put them into a list called "interfaces"
	for dev in clone.properties.config.hardware.device:
		if dev._type in ["VirtualVmxnet3"]:
			interfaces.append(dev._obj)

	#Put the mac addresses into a list.
	macs.append(mac)
	
	#Cycle through the interfaces.
	for interface, mac in zip(interfaces, macs):
		print interface
		interface.set_element_addressType("Manual")
		interface.set_element_macAddress(mac)

		#Invoke ReconfigVM_Task 
		request = VI.ReconfigVM_TaskRequestMsg()
		_this = request.new__this(clone._mor)
		_this.set_attribute_type(clone._mor.get_attribute_type())
		request.set_element__this(_this)
		spec = request.new_spec()
		dev_change = spec.new_deviceChange()
		dev_change.set_element_device(interface)
		dev_change.set_element_operation("edit")
		spec.set_element_deviceChange([dev_change])
		request.set_element_spec(spec)
		ret = con._proxy.ReconfigVM_Task(request)._returnval

		#Wait for the task to finish 
		task = VITask(ret, con)

		status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
		if status == task.STATE_SUCCESS:
		    print "VM successfully reconfigured"
		elif status == task.STATE_ERROR:
		    print "Error reconfiguring vm:", task.get_error_message()
コード例 #15
0
ファイル: vmware.py プロジェクト: LuizOz/simplestack
def revert_to_snapshot(server, vm_obj, snapshot_obj):
    request = VI.RevertToSnapshot_TaskRequestMsg()
    mor_snap = request.new__this(snapshot_obj._mor)
    mor_snap.set_attribute_type(snapshot_obj._mor.get_attribute_type())
    request.set_element__this(mor_snap)
    ret = server._proxy.RevertToSnapshot_Task(request)._returnval
    task = VITask(ret, server)

    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status != task.STATE_SUCCESS:
        raise HypervisorError("Snapshot:revert %s" % task.get_error_message())
コード例 #16
0
ファイル: vmware.py プロジェクト: pombredanne/simplestack
def revert_to_snapshot(server, vm_obj, snapshot_obj):
    request = VI.RevertToSnapshot_TaskRequestMsg()
    mor_snap = request.new__this(snapshot_obj._mor)
    mor_snap.set_attribute_type(snapshot_obj._mor.get_attribute_type())
    request.set_element__this(mor_snap)
    ret = server._proxy.RevertToSnapshot_Task(request)._returnval
    task = VITask(ret, server)

    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status != task.STATE_SUCCESS:
        raise HypervisorError("Snapshot:revert %s" % task.get_error_message())
コード例 #17
0
ファイル: vmware.py プロジェクト: pombredanne/simplestack
def delete_snapshot(server, vm_obj, snapshot_obj, remove_children=False):
    request = VI.RemoveSnapshot_TaskRequestMsg()
    mor_snap = request.new__this(snapshot_obj._mor)
    mor_snap.set_attribute_type(snapshot_obj._mor.get_attribute_type())
    request.set_element__this(mor_snap)
    request.set_element_removeChildren(remove_children)
    ret = server._proxy.RemoveSnapshot_Task(request)._returnval
    task = VITask(ret, server)

    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    vm_obj.refresh_snapshot_list()
    if status != task.STATE_SUCCESS:
        raise HypervisorError("Snapshot:delete %s" % task.get_error_message())
コード例 #18
0
def remove_vm(vm_name):
    vm = server.get_vm_by_name(vm_name)
    request = VI.Destroy_TaskRequestMsg()
    tiger = request.new__this(vm._mor)
    tiger.set_attribute_type(vm._mor.get_attribute_type())
    request.set_element__this(tiger)
    ret = server._proxy.Destroy_Task(request)._returnval
    task = VITask(ret, server)
    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status == task.STATE_SUCCESS:
        print "VM successfully deleted from disk"
    elif status == task.STATE_ERROR:
        print "Error removing vm:", task.get_error_message()
コード例 #19
0
ファイル: vmware.py プロジェクト: LuizOz/simplestack
def delete_snapshot(server, vm_obj, snapshot_obj, remove_children=False):
    request = VI.RemoveSnapshot_TaskRequestMsg()
    mor_snap = request.new__this(snapshot_obj._mor)
    mor_snap.set_attribute_type(snapshot_obj._mor.get_attribute_type())
    request.set_element__this(mor_snap)
    request.set_element_removeChildren(remove_children)
    ret = server._proxy.RemoveSnapshot_Task(request)._returnval
    task = VITask(ret, server)

    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    vm_obj.refresh_snapshot_list()
    if status != task.STATE_SUCCESS:
        raise HypervisorError("Snapshot:delete %s" % task.get_error_message())
コード例 #20
0
ファイル: vmware.py プロジェクト: pombredanne/simplestack
def delete_vm(server, vm_obj):
    # Invoke Destroy_Task
    request = VI.Destroy_TaskRequestMsg()
    _this = request.new__this(vm_obj._mor)
    _this.set_attribute_type(vm_obj._mor.get_attribute_type())
    request.set_element__this(_this)
    ret = server._proxy.Destroy_Task(request)._returnval

    # Wait for the task to finish
    task = VITask(ret, server)

    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status != task.STATE_SUCCESS:
        raise HypervisorError("Guest:delete %s" % task.get_error_message())
コード例 #21
0
ファイル: esxi.py プロジェクト: matelakat/ditty-box
    def delete_vm(self, vm):
        #Invoke Destroy_Task
        request = VI.Destroy_TaskRequestMsg()

        _this = request.new__this(vm.esxi_vm._mor)
        _this.set_attribute_type(vm.esxi_vm._mor.get_attribute_type())
        request.set_element__this(_this)
        ret = self.esxi_server._proxy.Destroy_Task(request)._returnval
        task = VITask(ret, self.esxi_server)

        #Wait for the task to finish
        task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
        if task.get_state() == task.STATE_ERROR:
            raise Exception("Error removing vm:", task.get_error_message())
コード例 #22
0
ファイル: vmware.py プロジェクト: LuizOz/simplestack
def delete_vm(server, vm_obj):
    # Invoke Destroy_Task
    request = VI.Destroy_TaskRequestMsg()
    _this = request.new__this(vm_obj._mor)
    _this.set_attribute_type(vm_obj._mor.get_attribute_type())
    request.set_element__this(_this)
    ret = server._proxy.Destroy_Task(request)._returnval

    # Wait for the task to finish
    task = VITask(ret, server)

    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status != task.STATE_SUCCESS:
        raise HypervisorError("Guest:delete %s" % task.get_error_message())
コード例 #23
0
    def __read_tasks(self, max_count, next_page):

        if not isinstance(max_count, int):
            raise VIException("max_count should be an integer",
                              FaultTypes.PARAMETER_ERROR)

        if next_page:
            request = VI.ReadNextTasksRequestMsg()
        else:
            request = VI.ReadPreviousTasksRequestMsg()

        _this = request.new__this(self._mor)
        _this.set_attribute_type(self._mor.get_attribute_type())
        request.set_element__this(_this)

        request.set_element_maxCount(max_count)
        try:
            if next_page:
                resp = self._server._proxy.ReadNextTasks(request)._returnval
            else:
                resp = self._server._proxy.ReadPreviousTasks(
                    request)._returnval

            ret = []
            for task in resp:
                ret.append(VITask(task.Task, self._server))

        except (VI.ZSI.FaultException) as e:
            raise VIApiException(e)

        return ret
コード例 #24
0
    def change_vdisk_size(self, vminstance, vdisk_name, vdisk_size):
        '''修改虚拟磁盘容量大小'''
        vdisk_name_str = vdisk_name.encode(
            'utf-8')  # 对磁盘名称进行解码(网络传输过来的值是unicode编码标准),否则将设置失败
        vm_obj = vminstance
        size_kb = int(vdisk_size) * 1024 * 1024  #GB转换为KB
        sizes = {}
        sizes[vdisk_name_str] = size_kb
        print sizes
        hd_sizes_kb = sizes
        hd_to_modify = []
        for dev in vm_obj.properties.config.hardware.device:
            if dev._type == "VirtualDisk" and dev.deviceInfo.label in hd_sizes_kb:
                dev_obj = dev._obj
                dev_obj.set_element_capacityInKB(
                    hd_sizes_kb[dev.deviceInfo.label])
                hd_to_modify.append(dev_obj)

        request = VI.ReconfigVM_TaskRequestMsg()
        _this = request.new__this(vm_obj._mor)
        _this.set_attribute_type(vm_obj._mor.get_attribute_type())
        request.set_element__this(_this)
        spec = request.new_spec()

        # Change the HDs sizes
        dev_changes = []
        for hd in hd_to_modify:
            dev_change = spec.new_deviceChange()
            dev_change.set_element_operation("edit")
            dev_change.set_element_device(hd)
            dev_changes.append(dev_change)
        if dev_changes:
            spec.set_element_deviceChange(dev_changes)

        request.set_element_spec(spec)
        ret = self.s._proxy.ReconfigVM_Task(request)._returnval

        # Wait for the task to finish
        task = VITask(ret, self.s)
        status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
        if status == task.STATE_SUCCESS:
            print "VM successfully reconfigured"
            return True
        elif status == task.STATE_ERROR:
            print "Error reconfiguring vm: %s" % task.get_error_message()
            return False
        self.s.disconnect()
コード例 #25
0
def remove_nic_vm(vCenterserver, username, password, datacentername, vm_name,
                  networklabel):
    con = vs_connect(vCenterserver, username, password)
    net_device = None
    vm_obj = con.get_vm_by_name(vm_name, datacenter=datacentername)
    if not vm_obj:
        raise Exception("VM %s not found" % vm_name)

        #Find nic device

    for dev in vm_obj.properties.config.hardware.device:
        if (dev._type in [
                "VirtualE1000", "VirtualE1000e", "VirtualPCNet32",
                "VirtualVmxnet", "VirtualNmxnet2", "VirtualVmxnet3"
        ] and hasattr(dev, "backing")
                and dev.deviceInfo.label == networklabel):
            net_device = dev._obj
            break

    if not net_device:
        raise Exception("The vm_name seems to lack a Virtual Nic")

    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vm_obj._mor)
    _this.set_attribute_type(vm_obj._mor.get_attribute_type())
    request.set_element__this(_this)
    spec = request.new_spec()
    dev_change = spec.new_deviceChange()
    dev_change.set_element_operation("remove")
    dev_change.set_element_device(net_device)
    # Submit the device change
    spec.set_element_deviceChange([dev_change])
    request.set_element_spec(spec)
    ret = con._proxy.ReconfigVM_Task(request)._returnval

    # Wait for the task to finish
    task = VITask(ret, con)

    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status == task.STATE_SUCCESS:
        return "VM successfully reconfigured"
    elif status == task.STATE_ERROR:
        return "failure reconfiguring vm_name: " + str(
            vm_obj, task.get_error_message())
    else:
        return " failure VM not found"
コード例 #26
0
def destroy_object(mor):
    req = VI.Destroy_TaskRequestMsg()
    _this = req.new__this(mor)
    _this.set_attribute_type(mor.get_attribute_type())
    req.set_element__this(_this)

    task_mor = server._proxy.Destroy_Task(req)._returnval
    return VITask(task_mor, server)
コード例 #27
0
def set_vm_datastore(host_ip, host_name, host_password, vm_name, reservation):
    #DebugInfo.objects.create(text_info=host_ip+host_name+host_password+vm_name+reservation)
    server = VIServer()
    server.connect(host_ip, host_name, host_password)
    vm_mor = server.get_vm_by_name(vm_name)
    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vm_mor._mor)
    _this.set_attribute_type(vm_mor._mor.get_attribute_type())
    request.set_element__this(_this)
    spec = request.new_spec()

    disk_size = get_disk_size(vm_mor)

    new_hdd = reservation
    device_config_specs = []

    if new_hdd * 1024 * 1024 > disk_size:
        disk = get_disks(vm_mor)[-1]
        hdd_in_GB = new_hdd * 1024 * 1024
        new_disk_size = hdd_in_GB - disk_size + disk.capacityInKB

        device_config_spec = spec.new_deviceChange()
        device_config_spec.set_element_operation('edit')
        disk._obj.set_element_capacityInKB(new_disk_size)
        device_config_spec.set_element_device(disk._obj)
        device_config_specs.append(device_config_spec)

    if len(device_config_specs) != 0:
        spec.set_element_deviceChange(device_config_specs)

    request.set_element_spec(spec)
    ret = server._proxy.ReconfigVM_Task(request)._returnval
    task = VITask(ret, server)
    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    ret_flag = False
    if status == task.STATE_SUCCESS:
        #ret = "VM <" + vm_name + "> successfully reconfigured"
        ret_flag = True
    elif status == task.STATE_ERROR:
        #ret = "Error reconfiguring vm <" + vm_name + ">: %s" % task.get_error_message()
        ret_flag = False

    return ret_flag
コード例 #28
0
    def delete_vdisk(self, vminstance, unit_number):
        '''删除虚拟机的硬盘
        :parameter
            @vvminstance: 虚拟机实例
            @unit_number: 硬盘ID
        :returns
            pass
        '''
        UNIT_NUMBER = unit_number  # Virtual disk unit number
        vm = vminstance
        # find the device to be removed
        dev = [
            dev for dev in vm.properties.config.hardware.device
            if dev._type == "VirtualDisk" and dev.unitNumber == UNIT_NUMBER
        ]
        if not dev:
            raise Exception("NO DEVICE FOUND")
        dev = dev[0]._obj
        request = VI.ReconfigVM_TaskRequestMsg()
        _this = request.new__this(vm._mor)
        _this.set_attribute_type(vm._mor.get_attribute_type())
        request.set_element__this(_this)

        spec = request.new_spec()
        dc = spec.new_deviceChange()
        dc.Operation = "remove"
        dc.Device = dev

        spec.DeviceChange = [dc]
        request.Spec = spec

        task = self.s._proxy.ReconfigVM_Task(request)._returnval
        vi_task = VITask(task, self.s)

        status = vi_task.wait_for_state(
            [vi_task.STATE_SUCCESS, vi_task.STATE_ERROR])
        if status == vi_task.STATE_ERROR:
            print "Error removing hdd from vm:", vi_task.get_error_message()
            sys.exit(1)
            return False
        else:
            print "Hard drive successfully removed"
            return True
コード例 #29
0
ファイル: mgmt_system.py プロジェクト: jkrocil/cfme_tests
    def delete_vm(self, vm_name):
        self.wait_vm_steady(vm_name)
        logger.info(" Deleting vSphere VM %s" % vm_name)
        vm = self._get_vm(vm_name)

        if vm.is_powered_on():
            self.stop_vm(vm_name)

        # When pysphere moves up to 0.1.8, we can just do:
        # vm.destroy()
        request = VI.Destroy_TaskRequestMsg()
        _this = request.new__this(vm._mor)
        _this.set_attribute_type(vm._mor.get_attribute_type())
        request.set_element__this(_this)
        rtn = self.api._proxy.Destroy_Task(request)._returnval

        task = VITask(rtn, self.api)
        status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
        return status == task.STATE_SUCCESS
コード例 #30
0
def add_new_nic(server, datacentername, vm, network_name):
    '''
  add new NIC to a VM
  server: VIServer object
  datacentername: datacenter name
  vm: VIVirtualMachine name
  network_name: network name
  '''
    net_device = None
    vm_obj = server.get_vm_by_name(vm, datacenter=datacentername)
    if not vm_obj:
        raise Exception("VM not found")

    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vm_obj._mor)
    _this.set_attribute_type(vm_obj._mor.get_attribute_type())
    request.set_element__this(_this)
    spec = request.new_spec()

    #add a nic.
    dev_change = spec.new_deviceChange()
    dev_change.set_element_operation("add")
    nic_ctlr = VI.ns0.VirtualPCNet32_Def("nic_ctlr").pyclass()
    nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def(
        "nic_backing").pyclass()
    nic_backing.set_element_deviceName(network_name)
    nic_ctlr.set_element_addressType("generated")
    nic_ctlr.set_element_backing(nic_backing)
    nic_ctlr.set_element_key(4)
    dev_change.set_element_device(nic_ctlr)

    spec.set_element_deviceChange([dev_change])
    request.set_element_spec(spec)
    ret = server._proxy.ReconfigVM_Task(request)._returnval

    #Wait for the task to finish
    task = VITask(ret, server)
    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status == task.STATE_SUCCESS:
        return "VM successfully reconfigured"
    elif status == task.STATE_ERROR:
        return "failure reconfiguring vm: " + str(task.get_error_message())
コード例 #31
0
ファイル: boot_order.py プロジェクト: thenavs/lolfuzz
 def reconfigure(self, spec_content, sync=True):
     try:
         request = VI.ReconfigVM_TaskRequestMsg()
         _this = request.new__this(self.vm._mor)
         _this.set_attribute_type(self.vm._mor.get_attribute_type())
         request.set_element__this(_this)
         spec = request.new_spec()
         spec.set_element_bootOptions(spec_content)
         request.set_element_spec(spec)
         task = self.vm._server._proxy.ReconfigVM_Task(request)._returnval
         vi_task = VITask(task, self.vm._server)
         if sync:
             status = vi_task.wait_for_state([vi_task.STATE_SUCCESS,
                                              vi_task.STATE_ERROR])
             if status == vi_task.STATE_ERROR:
                 raise VIException(vi_task.get_error_message(),
                                   FaultTypes.TASK_ERROR)
             return
         return vi_task
     except (VI.ZSI.FaultException), e:
         raise VIApiException(e)
コード例 #32
0
ファイル: boot_order.py プロジェクト: thenavs/lolfuzz
 def reconfigure(self, spec_content, sync=True):
     try:
         request = VI.ReconfigVM_TaskRequestMsg()
         _this = request.new__this(self.vm._mor)
         _this.set_attribute_type(self.vm._mor.get_attribute_type())
         request.set_element__this(_this)
         spec = request.new_spec()
         spec.set_element_bootOptions(spec_content)
         request.set_element_spec(spec)
         task = self.vm._server._proxy.ReconfigVM_Task(request)._returnval
         vi_task = VITask(task, self.vm._server)
         if sync:
             status = vi_task.wait_for_state(
                 [vi_task.STATE_SUCCESS, vi_task.STATE_ERROR])
             if status == vi_task.STATE_ERROR:
                 raise VIException(vi_task.get_error_message(),
                                   FaultTypes.TASK_ERROR)
             return
         return vi_task
     except (VI.ZSI.FaultException), e:
         raise VIApiException(e)
コード例 #33
0
ファイル: vmware.py プロジェクト: pombredanne/simplestack
def create_snapshot(server, vm_obj, snapshot_name):
    snapshot_id = str(uuid.uuid4())
    request = VI.CreateSnapshot_TaskRequestMsg()
    _this = request.new__this(vm_obj._mor)
    _this.set_attribute_type(vm_obj._mor.get_attribute_type())
    request.set_element__this(_this)

    request.set_element_name(snapshot_name)
    request.set_element_description(snapshot_id)
    request.set_element_memory(False)
    request.set_element_quiesce(False)

    ret = server._proxy.CreateSnapshot_Task(request)._returnval
    task = VITask(ret, server)

    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])

    if status != task.STATE_SUCCESS:
        raise HypervisorError("Snapshot:create %s" % task.get_error_message())

    vm_obj.refresh_snapshot_list()
    return get_snapshot(vm_obj, snapshot_id)
コード例 #34
0
ファイル: vmware.py プロジェクト: LuizOz/simplestack
def create_snapshot(server, vm_obj, snapshot_name):
    snapshot_id = str(uuid.uuid4())
    request = VI.CreateSnapshot_TaskRequestMsg()
    _this = request.new__this(vm_obj._mor)
    _this.set_attribute_type(vm_obj._mor.get_attribute_type())
    request.set_element__this(_this)

    request.set_element_name(snapshot_name)
    request.set_element_description(snapshot_id)
    request.set_element_memory(False)
    request.set_element_quiesce(False)

    ret = server._proxy.CreateSnapshot_Task(request)._returnval
    task = VITask(ret, server)

    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])

    if status != task.STATE_SUCCESS:
        raise HypervisorError("Snapshot:create %s" % task.get_error_message())

    vm_obj.refresh_snapshot_list()
    return get_snapshot(vm_obj, snapshot_id)
コード例 #35
0
def create_nas_store(host, access_mode, local_path, remote_host, remote_path,
                     username=None, password=None, volume_type='NFS'):

    #access_mode: 'readOnly' or 'readWrite'
    #volume_type: 'CIFS' or 'NFS' (if not set defaults to NFS)
    
    host_properties = VIProperty(server, host)
    
    hds = host_properties.configManager.datastoreSystem._obj

    request = VI.CreateNasDatastoreRequestMsg()
    _this = request.new__this(hds)
    _this.set_attribute_type(hds.get_attribute_type())
    request.set_element__this(_this)
    
    spec = request.new_spec()
    spec.set_element_accessMode(access_mode)
    spec.set_element_localPath(local_path)
    spec.set_element_remoteHost(remote_host)
    spec.set_element_remotePath(remote_path)
    if username:
        spec.set_element_userName(username)
    if password:
        spec.set_element_password(password)
    if volume_type:
        spec.set_element_type(volume_type)
    
    request.set_element_spec(spec)

    ret = server._proxy.CreateNasDatastore(request)._returnval

    task = VITask(ret, server)
    status = task.wait_for_state([task.STATE_SUCCESS,task.STATE_ERROR])
    if status == task.STATE_SUCCESS:
      print "Successfully mounted NFS share"
    elif status == task.STATE_ERROR:
      print "Error mounting NFS share: %s" % task.get_error_message()
コード例 #36
0
def change_memory_size(vminstance, memory_gb):
    '''Modify the memory size of the virtual machine'''
    memory_mb = memory_gb * 1024  #GB conversion MB
    vm_obj = vminstance
    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vm_obj._mor)
    _this.set_attribute_type(vm_obj._mor.get_attribute_type())
    request.set_element__this(_this)
    spec = request.new_spec()

    # Memory size
    spec.set_element_memoryMB(memory_mb)

    request.set_element_spec(spec)
    ret = server._proxy.ReconfigVM_Task(request)._returnval
    task = VITask(ret, server)
    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status == task.STATE_SUCCESS:
        print "VM successfully reconfigured"
        return True
    elif status == task.STATE_ERROR:
        print "Error reconfiguring vm: %s" % task.get_error_message()
        return False
    server.disconnect()
コード例 #37
0
    def get_latest_tasks(self):
        """
        Returns a list of task items in the 'viewable latest page'. As new tasks
        that match the collector's filter are created, they are added to this
        page, and the oldest tasks are removed from the collector to keep the 
        size of the page.
        The "oldest task" is the one with the oldest creation time. 
        The tasks in the returned page are unordered. 
        """
        self._props._flush_cache()
        if not hasattr(self._props, "latestPage"):
            return []

        ret = []
        for task in self._props.latestPage:
            ret.append(VITask(task.task._obj, self._server))
        return ret
コード例 #38
0
def delete_vm_by_name(name, server, remove_files=True):
    """Delete VM

    Unregisters a VM and remove it files from the datastore by name.
    @name is the VM name.
    @remove_files - if True (default) will delete VM files from datastore.
    """
    # Import
    from pysphere import VITask
    from pysphere.resources import VimService_services as VI

    try:
        #Get VM
        vm = server.get_vm_by_name(name)

        if remove_files:
            #Invoke Destroy_Task
            request = VI.Destroy_TaskRequestMsg()

            _this = request.new__this(vm._mor)
            _this.set_attribute_type(vm._mor.get_attribute_type())
            request.set_element__this(_this)
            ret = server._proxy.Destroy_Task(request)._returnval
            task = VITask(ret, server)

            #Wait for the task to finish
            status = task.wait_for_state([task.STATE_SUCCESS,
                                          task.STATE_ERROR])
            if status == task.STATE_SUCCESS:
                print "VM successfully unregistered and deleted from datastore"
            elif status == task.STATE_ERROR:
                print "Error removing vm:", task.get_error_message()
        elif not remove_files:
            #Invoke UnregisterVMRequestMsg
            request = VI.UnregisterVMRequestMsg()

            _this = request.new__this(vm._mor)
            _this.set_attribute_type(vm._mor.get_attribute_type())
            request.set_element__this(_this)
            ret = server._proxy.UnregisterVM(request)
            task = VITask(ret, server)

            print "Done."

    except (VI.ZSI.FaultException), e:
        raise VIApiException(e)
コード例 #39
0
ファイル: module.py プロジェクト: thydel/ar-pysphere-misc
    datastore_name = m.group(1)
    backing.FileName = "[%s]" % datastore_name
    backing.DiskMode = "persistent"
    backing.Split = False
    backing.WriteThrough = False
    backing.ThinProvisioned = False
    backing.EagerlyScrub = False
    hd.Backing = backing

    disk_spec.Device = hd

    spec.DeviceChange = [disk_spec]
    request.Spec = spec

    task = server._proxy.ReconfigVM_Task(request)._returnval
    vi_task = VITask(task, server)

    # Wait for task to finish
    status = vi_task.wait_for_state(
        [vi_task.STATE_SUCCESS, vi_task.STATE_ERROR])

    if status == vi_task.STATE_ERROR:
        msg = "ERROR CONFIGURING VM: " + vi_task.get_error_message()
        module.fail_json(msg=msg)

    module.exit_json(changed=True, ansible_facts=facts)


# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main()
コード例 #40
0
memory_mb = 512

vm_obj = server.get_vm_by_name("DEV_VM.clone")

request = VI.ReconfigVM_TaskRequestMsg()
_this = request.new__this(vm_obj._mor)
_this.set_attribute_type(vm_obj._mor.get_attribute_type())
request.set_element__this(_this)
spec = request.new_spec()

#set the new RAM size
spec.set_element_memoryMB(memory_mb)

#set the no of CPU and socket
spec.set_element_numCPUs(2)
#spec.set_element_numCoresPerSocket(2)

request.set_element_spec(spec)
ret = server._proxy.ReconfigVM_Task(request)._returnval

#Wait for the task to finish
task = VITask(ret, server)
status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
if status == task.STATE_SUCCESS:
    print "VM successfully reconfigured"
elif status == task.STATE_ERROR:
    print "Error reconfiguring vm: %s" % task.get_error_message()

server.disconnect()
コード例 #41
0
    pool.set_attribute_type(rp_mor.get_attribute_type())
    location.set_element_pool(pool)

if host_mor:
    hs = location.new_host(host_mor)
    hs.set_attribute_type(host_mor.get_attribute_type())
    location.set_element_host(hs)

spec.set_element_location(location)

# powerOn
spec.set_element_powerOn(False)

# template
spec.set_element_template(False)

# Set the 'spec' element of the request
request.set_element_spec(spec)

# Execute the request
task = server._proxy.CloneVM_Task(request)._returnval

vi_task = VITask(task, server)

status = vi_task.wait_for_state([vi_task.STATE_SUCCESS, vi_task.STATE_ERROR])

if status == vi_task.STATE_ERROR:
    print('Failed')

    print(vi_task.get_error_message())
コード例 #42
0
hd.UnitNumber = UNIT_NUMBER
hd.CapacityInKB = DISK_SIZE_IN_MB * 1024
hd.ControllerKey = 1000

backing = VI.ns0.VirtualDiskFlatVer2BackingInfo_Def("backing").pyclass()
backing.FileName = "[%s]" % DATASTORE_NAME
backing.DiskMode = "persistent"
backing.Split = False
backing.WriteThrough = False
backing.ThinProvisioned = False
backing.EagerlyScrub = False
hd.Backing = backing

dc.Device = hd

spec.DeviceChange = [dc]
request.Spec = spec

task = s._proxy.ReconfigVM_Task(request)._returnval
vi_task = VITask(task, s)

# Wait for task to finish
status = vi_task.wait_for_state([vi_task.STATE_SUCCESS, vi_task.STATE_ERROR])

if status == vi_task.STATE_ERROR:
    print "ERROR CONFIGURING VM:", vi_task.get_error_message()
else:
    print "VM CONFIGURED SUCCESSFULLY"

s.disconnect()
コード例 #43
0
ファイル: egniter.py プロジェクト: bartekrutkowski/egniter
def esx_vm_configure(config_json):

    config = config_create(config_json)
    properties = []

    esx = esx_connect(ESX_HOST, ESX_USER, ESX_PASS)
    vm = esx_vm_get(esx, config_json['vapp_net_hostname'])

    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vm._mor)
    _this.set_attribute_type(vm._mor.get_attribute_type())
    request.set_element__this(_this)

    spec = request.new_spec()
    vappconfig = spec.new_vAppConfig()

    for operation, items in config.items():
        for item in items:
            prop = vappconfig.new_property()
            prop.set_element_operation(operation)
            info = prop.new_info()
            for k, v in item.items():
                method = getattr(info, "set_element_" + k)
                method(v)
            prop.set_element_info(info)
            properties.append(prop)

    vappconfig.set_element_property(properties)
    spec.set_element_vAppConfig(vappconfig)

    request.set_element_spec(spec)
    task = esx._proxy.ReconfigVM_Task(request)._returnval
    vi_task = VITask(task, esx)

    status = vi_task.wait_for_state(
        [vi_task.STATE_SUCCESS, vi_task.STATE_ERROR])
    esx.disconnect()

    esx = esx_connect(ESX_HOST, ESX_USER, ESX_PASS)
    vm = esx_vm_get(esx, config_json['vapp_net_hostname'])

    spec = request.new_spec()
    spec.set_element_memoryMB(config_json['hw_mem_mb'])

    request.set_element_spec(spec)
    task = esx._proxy.ReconfigVM_Task(request)._returnval
    vi_task = VITask(task, esx)

    status = vi_task.wait_for_state(
        [vi_task.STATE_SUCCESS, vi_task.STATE_ERROR])
    esx.disconnect()

    esx = esx_connect(ESX_HOST, ESX_USER, ESX_PASS)
    vm = esx_vm_get(esx, config_json['vapp_net_hostname'])

    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vm._mor)
    _this.set_attribute_type(vm._mor.get_attribute_type())
    request.set_element__this(_this)

    spec = request.new_spec()
    spec.set_element_numCoresPerSocket(config_json['hw_vcpu'])
    spec.set_element_numCPUs(config_json['hw_vcpu'])

    request.set_element_spec(spec)
    task = esx._proxy.ReconfigVM_Task(request)._returnval
    vi_task = VITask(task, esx)

    status = vi_task.wait_for_state(
        [vi_task.STATE_SUCCESS, vi_task.STATE_ERROR])
    if status == vi_task.STATE_ERROR:
        print ('ERROR: %s' % vi_task.get_error_message())
    else:
        print ('vApp config successful.')
    esx.disconnect()

    # iterate over disk dictionary and add any disks found
    # to the vm configuration - the dict disk number starts with 1, not 0
    # as the disk with number 0 is already inherited from the template
    if 'hw_disk_gb' in config_json:
        for disk in config_json['hw_disk_gb']:
            esx = esx_connect(ESX_HOST, ESX_USER, ESX_PASS)
            vm = esx_vm_get(esx, config_json['vapp_net_hostname'])

            request = VI.ReconfigVM_TaskRequestMsg()
            _this = request.new__this(vm._mor)
            _this.set_attribute_type(vm._mor.get_attribute_type())
            request.set_element__this(_this)

            spec = request.new_spec()

            dc = spec.new_deviceChange()
            dc.Operation = "add"
            dc.FileOperation = "create"

            hd = VI.ns0.VirtualDisk_Def("hd").pyclass()
            hd.Key = -100
            hd.UnitNumber = int(disk)
            hd.CapacityInKB = config_json['hw_disk_gb'][disk] * 1024 * 1024
            hd.ControllerKey = 1000

            backing = VI.ns0.VirtualDiskFlatVer2BackingInfo_Def(
                "backing").pyclass()
            backing.FileName = "%s" % vm.get_property('path').split()[0]
            backing.DiskMode = "persistent"
            backing.Split = False
            backing.WriteThrough = False
            backing.ThinProvisioned = False
            backing.EagerlyScrub = False
            hd.Backing = backing

            dc.Device = hd

            spec.DeviceChange = [dc]
            request.Spec = spec

            request.set_element_spec(spec)
            task = esx._proxy.ReconfigVM_Task(request)._returnval
            vi_task = VITask(task, esx)

            # Wait for task to finis
            status = vi_task.wait_for_state([vi_task.STATE_SUCCESS,
                                             vi_task.STATE_ERROR])
            if status == vi_task.STATE_ERROR:
                print ('ERROR: %s' % vi_task.get_error_message())
            else:
                print ('Disk config successful.')
            esx.disconnect()

    # iterate over network adapter dictionary and add any adapters found
    # to the vm configuration
    for adapter in config_json['hw_vmnet']['adapter']:
        esx = esx_connect(ESX_HOST, ESX_USER, ESX_PASS)
        vm = esx_vm_get(esx, config_json['vapp_net_hostname'])

        request = VI.ReconfigVM_TaskRequestMsg()
        _this = request.new__this(vm._mor)
        _this.set_attribute_type(vm._mor.get_attribute_type())
        request.set_element__this(_this)

        spec = request.new_spec()
        dev_change = spec.new_deviceChange()
        dev_change.set_element_operation('add')
        nic_ctlr = VI.ns0.VirtualVmxnet3_Def('nic_ctlr').pyclass()
        nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def(
            'nic_backing').pyclass()
        nic_backing.set_element_deviceName(
            config_json['hw_vmnet']['adapter'][adapter]['label'])
        nic_ctlr.set_element_addressType('generated')
        nic_ctlr.set_element_backing(nic_backing)
        nic_ctlr.set_element_key(4)
        dev_change.set_element_device(nic_ctlr)

        spec.set_element_deviceChange([dev_change])
        request.set_element_spec(spec)
        ret = esx._proxy.ReconfigVM_Task(request)._returnval

        # Wait for the task to finish
        vi_task = VITask(ret, esx)

        status = vi_task.wait_for_state([vi_task.STATE_SUCCESS,
                                         vi_task.STATE_ERROR])
        if status == vi_task.STATE_ERROR:
            print ('ERROR: %s' % vi_task.get_error_message())
        else:
            print ('Network adapter config successful.')
        esx.disconnect()
コード例 #44
0
ファイル: esxi.py プロジェクト: matelakat/ditty-box
    def _create_vm(self, volume_name, vm_name, vm_description, mem_megs,
        cpu_count, guest_os_id, disk_size, network_name, vm_folder, resource_pool, host):

        create_vm_request = VI.CreateVM_TaskRequestMsg()
        config = create_vm_request.new_config()
        vmfiles = config.new_files()
        vmfiles.set_element_vmPathName(volume_name)
        config.set_element_files(vmfiles)
        config.set_element_name(vm_name)
        config.set_element_annotation(vm_description)
        config.set_element_memoryMB(mem_megs)
        config.set_element_numCPUs(cpu_count)
        config.set_element_guestId(guest_os_id)
        devices = []

        #add a scsi controller
        disk_ctrl_key = 1
        scsi_ctrl_spec =config.new_deviceChange()
        scsi_ctrl_spec.set_element_operation('add')
        scsi_ctrl = VI.ns0.VirtualLsiLogicController_Def("scsi_ctrl").pyclass()
        scsi_ctrl.set_element_busNumber(0)
        scsi_ctrl.set_element_key(disk_ctrl_key)
        scsi_ctrl.set_element_sharedBus("noSharing")

        scsi_ctrl_spec.set_element_device(scsi_ctrl)
        devices.append(scsi_ctrl_spec)

        # create a new disk - file based - for the vm
        disk_spec = config.new_deviceChange()
        disk_spec.set_element_fileOperation("create")
        disk_spec.set_element_operation("add")
        disk_ctlr = VI.ns0.VirtualDisk_Def("disk_ctlr").pyclass()
        disk_backing = VI.ns0.VirtualDiskFlatVer2BackingInfo_Def("disk_backing").pyclass()
        disk_backing.set_element_fileName(volume_name)
        disk_backing.set_element_diskMode("persistent")
        disk_backing.set_element_thinProvisioned(True)
        disk_ctlr.set_element_key(0)
        disk_ctlr.set_element_controllerKey(disk_ctrl_key)
        disk_ctlr.set_element_unitNumber(0)
        disk_ctlr.set_element_backing(disk_backing)
        disk_ctlr.set_element_capacityInKB(disk_size)
        disk_spec.set_element_device(disk_ctlr)
        devices.append(disk_spec)

        #add a NIC. the network Name must be set as the device name to create the NIC.
        nic_spec = config.new_deviceChange()
        nic_spec.set_element_operation("add")
        nic_ctlr = VI.ns0.VirtualE1000_Def("nic_ctlr").pyclass()
        nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def("nic_backing").pyclass()
        nic_backing.set_element_deviceName(network_name)
        nic_ctlr.set_element_addressType("generated")
        nic_ctlr.set_element_backing(nic_backing)
        nic_ctlr.set_element_key(1)
        nic_spec.set_element_device(nic_ctlr)
        devices.append(nic_spec)

        config.set_element_deviceChange(devices)
        create_vm_request.set_element_config(config)
        folder_mor = create_vm_request.new__this(vm_folder)
        folder_mor.set_attribute_type(vm_folder.get_attribute_type())
        create_vm_request.set_element__this(folder_mor)
        rp_mor = create_vm_request.new_pool(resource_pool)
        rp_mor.set_attribute_type(resource_pool.get_attribute_type())
        create_vm_request.set_element_pool(rp_mor)
        host_mor = create_vm_request.new_host(host)
        host_mor.set_attribute_type(host.get_attribute_type())
        create_vm_request.set_element_host(host_mor)

        #CREATE THE VM
        taskmor = self.esxi_server._proxy.CreateVM_Task(create_vm_request)._returnval
        task = VITask(taskmor, self.esxi_server)
        task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])

        if task.get_state() == task.STATE_ERROR:
            raise Exception("Error creating vm: %s" % task.get_error_message())
コード例 #45
0
def requestNewSpec(vmname):
    statusdict = {}

    try:
        vm_obj = server.get_vm_by_name(vmname)
    except VIException as inst:
        statusdict["error"] = str(inst)
        print json.dumps(statusdict)
        sys.exit()

    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vm_obj._mor)
    _this.set_attribute_type(vm_obj._mor.get_attribute_type())
    request.set_element__this(_this)
    spec = request.new_spec()

    if args.memory:
        memory_mb = int(args.memory[0])
        #set the new RAM size
        spec.set_element_memoryMB(memory_mb)

    if args.disk:
        hd_name = str(args.disk[1])
        hd_sizes_kb = {hd_name: int(args.disk[0]) * 1024 * 1024}  #GB
        hd_to_modify = []
        for dev in vm_obj.properties.config.hardware.device:
            if dev._type == "VirtualDisk" and dev.deviceInfo.label in hd_sizes_kb:
                dev_obj = dev._obj
                dev_obj.set_element_capacityInKB(
                    hd_sizes_kb[dev.deviceInfo.label])
                hd_to_modify.append(dev_obj)
        #Change the HDs sizes
        dev_changes = []
        for hd in hd_to_modify:
            dev_change = spec.new_deviceChange()
            dev_change.set_element_operation("edit")
            dev_change.set_element_device(hd)
            dev_changes.append(dev_change)
        if dev_changes:
            spec.set_element_deviceChange(dev_changes)

    if args.cpu:
        numcpu = int(args.cpu[0])
        numcorespersocket = int(args.cpu[1])
        temp = float(numcpu) / float(numcorespersocket)
        temp = (temp % 1)
        if temp == float(0):
            if (numcpu / numcorespersocket) > 8:
                statusdict["cpu"] = "Only 8 socket for a VM is supported"
                print json.dumps(statusdict)
                sys.exit()
            else:
                spec.set_element_numCPUs(numcpu)
                spec.set_element_numCoresPerSocket(numcorespersocket)
        else:
            statusdict[
                "cpu"] = "No of CPU divided by No of Cores per Socket should always be a Integer"
            print json.dumps(statusdict)
            sys.exit()

    request.set_element_spec(spec)
    ret = server._proxy.ReconfigVM_Task(request)._returnval

    #Wait for the task to finish
    task = VITask(ret, server)
    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status == task.STATE_SUCCESS:
        #print "VM successfully reconfigured"
        statusdict[vmname] = "VM successfully reconfigured"
    elif status == task.STATE_ERROR:
        #print "Error reconfiguring vm: %s" % task.get_error_message()
        errormsg = str(task.get_error_message())
        statusdict["error"] = errormsg

    print json.dumps(statusdict)
コード例 #46
0
ファイル: select_cdrom_device.py プロジェクト: morphizer/misc
    if dev._type == "VirtualCdrom" and not dev.connectable.startConnected:
      d = dev._obj
      d.Connectable.set_element_startConnected(True)
      request = VI.ReconfigVM_TaskRequestMsg()
      _this = request.new__this(vm._mor)
      _this.set_attribute_type(vm._mor.get_attribute_type())
      request.set_element__this(_this)
      spec = request.new_spec()
      dev_change = spec.new_deviceChange()
      dev_change.set_element_device(d)
      dev_change.set_element_operation("edit")
      spec.set_element_deviceChange([dev_change])
      request.set_element_spec(spec)
      ret = server._proxy.ReconfigVM_Task(request)._returnval

      task = VITask(ret, server)
      status = task.wait_for_state([task.STATE_SUCCESS,task.STATE_ERROR])
      if status == task.STATE_SUCCESS:
        print "%s: successfully reconfigured" % vm.properties.name
      elif status == task.STATE_ERROR:
        print "%s: Error reconfiguring vm" % vm.properties.name
        print "%s" % task.get_error_message()
elif options.device == "host":
  #HOST DEVICE: hardcoded to use the first valid host device
  valid_host_device = get_valid_host_devices(vm)[0]
  change_cdrom_type(cdrom, "HOST DEVICE", valid_host_device)
elif options.device == "client":
  #CLIENT DEVICE
  change_cdrom_type(cdrom, "CLIENT DEVICE")

apply_changes(vm, cdrom)
コード例 #47
0
ファイル: vmware.py プロジェクト: LuizOz/simplestack
def update_vm(server, vm_obj, guestdata):
    new_annotation = guestdata.get("tags")
    new_cpus = guestdata.get("cpus")
    new_hdd = guestdata.get("hdd")
    new_iso = guestdata.get("iso")
    new_memory = guestdata.get("memory")
    new_name = guestdata.get("name")
    enable_vmi = guestdata.get("paravirtualized")

    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vm_obj._mor)
    _this.set_attribute_type(vm_obj._mor.get_attribute_type())
    request.set_element__this(_this)
    spec = request.new_spec()

    if new_name and vm_obj.properties.config.name != new_name:
        spec.set_element_name(new_name)

    if new_memory and vm_obj.properties.config.hardware.memoryMB != new_memory:
        # set the new RAM size
        spec.set_element_memoryMB(new_memory)

    if new_cpus and vm_obj.properties.config.hardware.numCPU != new_cpus:
        # set the new Cpu count
        spec.set_element_numCPUs(new_cpus)

    device_config_specs = []
    if new_hdd:
        disk_size = get_disk_size(vm_obj)
        if new_hdd * 1024 * 1024 > disk_size:
            disk = get_disks(vm_obj)[-1]
            hdd_in_GB = new_hdd * 1024 * 1024
            new_disk_size = hdd_in_GB - disk_size + disk.capacityInKB

            device_config_spec = spec.new_deviceChange()
            device_config_spec.set_element_operation('edit')
            disk._obj.set_element_capacityInKB(new_disk_size)
            device_config_spec.set_element_device(disk._obj)
            device_config_specs.append(device_config_spec)

    if new_iso:
        media_device = get_cd(vm_obj)
        connectable = media_device._obj.new_connectable()
        connectable.set_element_allowGuestControl(False)

        if new_iso.get("name") and new_iso["name"] != "":
            connectable.set_element_connected(True)
            connectable.set_element_startConnected(True)
            media_device._obj.set_element_connectable(connectable)

            backing = VCdIsoBackingInfo()
            backing.set_element_fileName(new_iso["name"])
            media_device._obj.set_element_backing(backing)
        else:
            connectable.set_element_connected(False)
            connectable.set_element_startConnected(False)
            media_device._obj.set_element_connectable(connectable)

        device_config_spec = spec.new_deviceChange()
        device_config_spec.set_element_operation('edit')
        device_config_spec.set_element_device(media_device._obj)
        device_config_specs.append(device_config_spec)

    if enable_vmi is not None:
        vmi_driver = get_vmi_driver(vm_obj)
        if enable_vmi and not vmi_driver:
            vmi_driver = VirtualMachineVMIROM()
            vmi_driver.set_element_key(11000)

            device_config_spec = spec.new_deviceChange()
            device_config_spec.set_element_operation('add')
            device_config_spec.set_element_device(vmi_driver)
            device_config_specs.append(device_config_spec)
        elif not enable_vmi and vmi_driver:
            device_config_spec = spec.new_deviceChange()
            device_config_spec.set_element_operation('remove')
            device_config_spec.set_element_device(vmi_driver._obj)
            device_config_specs.append(device_config_spec)

    if len(device_config_specs) != 0:
        spec.set_element_deviceChange(device_config_specs)

    if new_annotation:
        spec.set_element_annotation("\n".join(new_annotation))

    request.set_element_spec(spec)
    ret = server._proxy.ReconfigVM_Task(request)._returnval

    # Wait for the task to finish
    task = VITask(ret, server)
    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status != task.STATE_SUCCESS:
        raise HypervisorError("Guest:update %s" % task.get_error_message())