Exemplo n.º 1
0
def enable_vmi(server, vm_obj):
    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vm_obj._mor)
    _this.set_attribute_type(vm_obj._mor.get_attribute_type())
    request.set_element__this(_this)
    spec = request.new_spec()

    device_config_specs = []

    media_device = VirtualMachineVMIROM()
    media_device.set_element_key(11000)
    device_config_spec = spec.new_deviceChange()
    device_config_spec.set_element_operation('add')
    device_config_spec.set_element_device(media_device)
    device_config_specs.append(device_config_spec)

    spec.set_element_deviceChange(device_config_specs)

    request.set_element_spec(spec)
    ret = server._proxy.ReconfigVM_Task(request)._returnval

    # Wait for the task to finish
    task = VITask(ret, server)
    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status != task.STATE_SUCCESS:
        raise HypervisorError("Guest:update %s" % task.get_error_message())
Exemplo n.º 2
0
def change_cpu(vminstance=None, cpu_number=None, cpu_core=None):
    '''Modify the number and core number of the virtual machine CPU'''
    vm_obj = vminstance
    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vm_obj._mor)
    _this.set_attribute_type(vm_obj._mor.get_attribute_type())
    request.set_element__this(_this)
    spec = request.new_spec()

    if cpu_number is not None:
        spec.set_element_numCPUs(cpu_number)  # This sets CPU config to 2x1 (The two cpus are single-core)
    if cpu_core is not None:
        spec.set_element_numCoresPerSocket(cpu_core)  # This sets CPU config to 1x2 (instead of 2x2) 1 CPU dual-core

    request.set_element_spec(spec)
    ret = server._proxy.ReconfigVM_Task(request)._returnval
    task = VITask(ret, server)
    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status == task.STATE_SUCCESS:
        print "VM successfully reconfigured"
        return True
    elif status == task.STATE_ERROR:
        print "Error reconfiguring vm: %s" % task.get_error_message()
        return False
    server.disconnect()
Exemplo n.º 3
0
def set_vm_reservation(server,types,vm_name,reservation,level):
    vm_mor = server.get_vm_by_name(vm_name)
    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vm_mor._mor)
    _this.set_attribute_type(vm_mor._mor.get_attribute_type())
    request.set_element__this(_this)
    spec = request.new_spec()

    if types == 'cpu':
        spec.set_element_numCPUs(reservation)
    elif types == 'memory':
        spec.set_element_memoryMB(reservation)

    request.set_element_spec(spec)
    ret = server._proxy.ReconfigVM_Task(request)._returnval
    config_result = False
    task = VITask(ret, server)
    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status == task.STATE_SUCCESS:
        ret = "VM <" + vm_name + "> successfully reconfigured"
        config_result = True
    elif status == task.STATE_ERROR:
        #print "Error reconfiguring vm <" + vm_name + ">: %s" % task.get_error_message()
        #print task.get_info()
        #print task.get_result()
        #print task.get_state()
        #print task.info
        ret = "Error reconfiguring vm <" + vm_name + ">: %s" % task.get_error_message()
        config_result = False
    return config_result
Exemplo n.º 4
0
def enable_vmi(server, vm_obj):
    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vm_obj._mor)
    _this.set_attribute_type(vm_obj._mor.get_attribute_type())
    request.set_element__this(_this)
    spec = request.new_spec()

    device_config_specs = []

    media_device = VirtualMachineVMIROM()
    media_device.set_element_key(11000)
    device_config_spec = spec.new_deviceChange()
    device_config_spec.set_element_operation('add')
    device_config_spec.set_element_device(media_device)
    device_config_specs.append(device_config_spec)

    spec.set_element_deviceChange(device_config_specs)

    request.set_element_spec(spec)
    ret = server._proxy.ReconfigVM_Task(request)._returnval

    # Wait for the task to finish
    task = VITask(ret, server)
    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status != task.STATE_SUCCESS:
        raise HypervisorError("Guest:update %s" % task.get_error_message())
Exemplo n.º 5
0
def disconnect_nic_from_network(vCenterserver,
                                username,
                                password,
                                datacentername,
                                vmname,
                                dvswitch_uuid,
                                portgroupKey,
                                network_name="VM Network",
                                nic_type="vmxnet3",
                                network_type="standard"):

    con = vs_connect(vCenterserver, username, password)
    vm_obj = con.get_vm_by_name(vmname, datacenter=datacentername)

    #Disconnect 3rd adaptar if its connected to network "VM Network"
    #network_name = "VM Network"
    device_name = "Network adapter 3"

    #Find Virtual Nic device
    net_device = None
    for dev in vmname.properties.config.hardware.device:
        if (dev._type in [
                "VirtualE1000", "VirtualE1000e", "VirtualPCNet32",
                "VirtualVmxnet", "VirtualNmxnet2", "VirtualVmxnet3"
        ] and dev.deviceInfo.label == network_name
                and dev.deviceInfo.summary == device_name):
            net_device = dev._obj
            break

    if not net_device:
        s.disconnect()
        raise Exception("The vm seems to lack a Virtual Nic")

    #Disconnect the device
    net_device.Connectable.Connected = True

    #Invoke ReconfigVM_Task
    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vmname._mor)
    _this.set_attribute_type(vmname._mor.get_attribute_type())
    request.set_element__this(_this)
    spec = request.new_spec()
    dev_change = spec.new_deviceChange()
    dev_change.set_element_device(net_device)
    dev_change.set_element_operation("edit")
    spec.set_element_deviceChange([dev_change])
    request.set_element_spec(spec)
    ret = s._proxy.ReconfigVM_Task(request)._returnval

    #Wait for the task to finish
    task = VITask(ret, s)

    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status == task.STATE_SUCCESS:
        print "VM successfully reconfigured"
    elif status == task.STATE_ERROR:
        print "Error reconfiguring vm:", task.get_error_message()

    s.disconnect()
Exemplo n.º 6
0
 def perform_reconfig(self, request):
     ret = self.esxi_server._proxy.ReconfigVM_Task(request)._returnval
     task = VITask(ret, self.esxi_server)
     status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
     if status == task.STATE_SUCCESS:
         print "VM successfully reconfigured"
     elif status == task.STATE_ERROR:
         print "Error reconfiguring vm: %s" % task.get_error_message()
Exemplo n.º 7
0
def connect_publicNIC_to_publicNet(vCenterserver, username, password,
                                   datacentername, vm_name, network_name,
                                   netlabel):
    '''
  Switch existing NIC to a different network
  con: VIServer object
  datacentername: datacenter name
  vm_name: VIVirtualMachine name
  network_name: network name
  '''

    con = vs_connect(vCenterserver, username, password)
    net_device = None
    vm_obj = con.get_vm_by_name(vm_name, datacenter=datacentername)
    if not vm_obj:
        raise Exception("VM %s not found" % vm_name)

    #Find nic device
    for dev in vm_obj.properties.config.hardware.device:
        if (dev._type in [
                "VirtualE1000", "VirtualE1000e", "VirtualPCNet32",
                "VirtualVmxnet", "VirtualNmxnet2", "VirtualVmxnet3"
        ] and hasattr(dev, "deviceInfo")
                and (dev.deviceInfo.label == netlabel)):
            net_device = dev._obj
    if not net_device:
        raise Exception("The vm_name seems to lack a Virtual Nic")

    if hasattr(net_device.Backing, "DeviceName"):
        net_device.Connectable.Connected = True
        net_device.Backing.set_element_deviceName(network_name)

    if hasattr(net_device.Backing, "Port"):
        #TODO convert device baching
        net_device.Connectable.Connected = True

    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vm_obj._mor)
    _this.set_attribute_type(vm_obj._mor.get_attribute_type())
    request.set_element__this(_this)
    spec = request.new_spec()
    dev_change = spec.new_deviceChange()
    dev_change.set_element_device(net_device)
    dev_change.set_element_operation("edit")
    spec.set_element_deviceChange([dev_change])
    request.set_element_spec(spec)

    ret = con._proxy.ReconfigVM_Task(request)._returnval

    #Wait for the task to finish
    task = VITask(ret, con)
    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status == task.STATE_SUCCESS:
        return "VM successfully reconfigured"
    elif status == task.STATE_ERROR:
        return "failure reconfiguring vm_name: " + str(
            task.get_error_message())
Exemplo n.º 8
0
def get_file_list(datastore,
                  path="/",
                  case_insensitive=True,
                  folders_first=True,
                  match_patterns=[]):

    ds = [k for k, v in s.get_datastores().items() if v == datastore][0]
    ds_browser = VIProperty(s, ds).browser._obj

    request = VI.SearchDatastore_TaskRequestMsg()
    _this = request.new__this(ds_browser)
    _this.set_attribute_type(ds_browser.get_attribute_type())
    request.set_element__this(_this)
    request.set_element_datastorePath("[%s] %s" % (datastore, path))

    search_spec = request.new_searchSpec()

    query = [
        VI.ns0.FloppyImageFileQuery_Def('floppy').pyclass(),
        VI.ns0.FolderFileQuery_Def('folder').pyclass(),
        VI.ns0.IsoImageFileQuery_Def('iso').pyclass(),
        VI.ns0.VmConfigFileQuery_Def('vm').pyclass(),
        VI.ns0.TemplateConfigFileQuery_Def('template').pyclass(),
        VI.ns0.VmDiskFileQuery_Def('vm_disk').pyclass(),
        VI.ns0.VmLogFileQuery_Def('vm_log').pyclass(),
        VI.ns0.VmNvramFileQuery_Def('vm_ram').pyclass(),
        VI.ns0.VmSnapshotFileQuery_Def('vm_snapshot').pyclass()
    ]
    search_spec.set_element_query(query)
    details = search_spec.new_details()
    details.set_element_fileOwner(True)
    details.set_element_fileSize(True)
    details.set_element_fileType(True)
    details.set_element_modification(True)
    search_spec.set_element_details(details)
    search_spec.set_element_searchCaseInsensitive(case_insensitive)
    search_spec.set_element_sortFoldersFirst(folders_first)
    search_spec.set_element_matchPattern(match_patterns)
    request.set_element_searchSpec(search_spec)
    response = s._proxy.SearchDatastore_Task(request)._returnval
    task = VITask(response, s)
    if task.wait_for_state([task.STATE_ERROR,
                            task.STATE_SUCCESS]) == task.STATE_ERROR:
        raise Exception(task.get_error_message())

    info = task.get_result()

    if not hasattr(info, "file"):
        return []
    return [
        {
            'path': fi.path,
            #'size':fi.fileSize,
            #'modified':fi.modification,
            #'owner':fi.owner
        } for fi in info.file
    ]
Exemplo n.º 9
0
def change_dvs_net(server, vm_obj, pg_map):
    """Takes a VIServer and VIVirtualMachine object and reconfigures
    dVS portgroups according to the mappings in the pg_map dict. The
    pg_map dict must contain the source portgroup as key and the
    destination portgroup as value"""
    # Find virtual NIC devices
    if vm_obj:
        net_device = []
        for dev in vm_obj.properties.config.hardware.device:
            if dev._type in ["VirtualE1000", "VirtualE1000e",
                            "VirtualPCNet32", "VirtualVmxnet",
                            "VirtualNmxnet2", "VirtualVmxnet3"]:
                net_device.append(dev)

    # Throw an exception if there is no NIC found
    if len(net_device) == 0:
        raise Exception("The vm seems to lack a Virtual Nic")

    # Use pg_map to set the new Portgroups
    for dev in net_device:
        old_portgroup = dev.backing.port.portgroupKey
        if pg_map.has_key(old_portgroup):
            dev.backing.port._obj.set_element_portgroupKey(
                pg_map[old_portgroup])
            dev.backing.port._obj.set_element_portKey('')

    # Invoke ReconfigVM_Task
    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vm_obj._mor)
    _this.set_attribute_type(vm_obj._mor.get_attribute_type())
    request.set_element__this(_this)

    # Build a list of device change spec objects
    devs_changed = []
    for dev in net_device:
        spec = request.new_spec()
        dev_change = spec.new_deviceChange()
        dev_change.set_element_device(dev._obj)
        dev_change.set_element_operation("edit")
        devs_changed.append(dev_change)

    # Submit the device change list
    spec.set_element_deviceChange(devs_changed)
    request.set_element_spec(spec)
    ret = server._proxy.ReconfigVM_Task(request)._returnval

    # Wait for the task to finish
    task = VITask(ret, server)

    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status == task.STATE_SUCCESS:
        print "VM %s successfully reconfigured" % vm_obj
    elif status == task.STATE_ERROR:
        print "Error reconfiguring vm: %s" % vm_obj, task.get_error_message()
    else:
        print "VM %s not found" % vm_obj
Exemplo n.º 10
0
def spawn_esx_vm(con, template, hostname, mac ):
	# Here we fetch the vm by its name #
	template_vm = find_vm(con, template)
	# template_vm = con.get_vm_by_name(template)
	print 'template vm is %s' %template_vm
	print 'new vm is %s' %hostname
	print 'mac is %s' %mac
	print ('Trying to clone %s to VM %s' % (template_vm,hostname))
	print template_vm
	print ('================================================================================')
	# Does the VM already exist? #
	if find_vm(con, hostname):
                print 'ERROR: %s already exists' % hostname
	else:
		clone = template_vm.clone(hostname, True, None, None, None, None, False)
		print ('VM %s created' % (hostname))

	# And now we need to change its MAC address. We expect to find two Vmxnet3 devices#
	interfaces = []
	macs = []
	# Query network interfaces from vCenter and put them into a list called "interfaces"
	for dev in clone.properties.config.hardware.device:
		if dev._type in ["VirtualVmxnet3"]:
			interfaces.append(dev._obj)

	#Put the mac addresses into a list.
	macs.append(mac)
	
	#Cycle through the interfaces.
	for interface, mac in zip(interfaces, macs):
		print interface
		interface.set_element_addressType("Manual")
		interface.set_element_macAddress(mac)

		#Invoke ReconfigVM_Task 
		request = VI.ReconfigVM_TaskRequestMsg()
		_this = request.new__this(clone._mor)
		_this.set_attribute_type(clone._mor.get_attribute_type())
		request.set_element__this(_this)
		spec = request.new_spec()
		dev_change = spec.new_deviceChange()
		dev_change.set_element_device(interface)
		dev_change.set_element_operation("edit")
		spec.set_element_deviceChange([dev_change])
		request.set_element_spec(spec)
		ret = con._proxy.ReconfigVM_Task(request)._returnval

		#Wait for the task to finish 
		task = VITask(ret, con)

		status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
		if status == task.STATE_SUCCESS:
		    print "VM successfully reconfigured"
		elif status == task.STATE_ERROR:
		    print "Error reconfiguring vm:", task.get_error_message()
Exemplo n.º 11
0
def revert_to_snapshot(server, vm_obj, snapshot_obj):
    request = VI.RevertToSnapshot_TaskRequestMsg()
    mor_snap = request.new__this(snapshot_obj._mor)
    mor_snap.set_attribute_type(snapshot_obj._mor.get_attribute_type())
    request.set_element__this(mor_snap)
    ret = server._proxy.RevertToSnapshot_Task(request)._returnval
    task = VITask(ret, server)

    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status != task.STATE_SUCCESS:
        raise HypervisorError("Snapshot:revert %s" % task.get_error_message())
Exemplo n.º 12
0
def revert_to_snapshot(server, vm_obj, snapshot_obj):
    request = VI.RevertToSnapshot_TaskRequestMsg()
    mor_snap = request.new__this(snapshot_obj._mor)
    mor_snap.set_attribute_type(snapshot_obj._mor.get_attribute_type())
    request.set_element__this(mor_snap)
    ret = server._proxy.RevertToSnapshot_Task(request)._returnval
    task = VITask(ret, server)

    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status != task.STATE_SUCCESS:
        raise HypervisorError("Snapshot:revert %s" % task.get_error_message())
Exemplo n.º 13
0
def delete_snapshot(server, vm_obj, snapshot_obj, remove_children=False):
    request = VI.RemoveSnapshot_TaskRequestMsg()
    mor_snap = request.new__this(snapshot_obj._mor)
    mor_snap.set_attribute_type(snapshot_obj._mor.get_attribute_type())
    request.set_element__this(mor_snap)
    request.set_element_removeChildren(remove_children)
    ret = server._proxy.RemoveSnapshot_Task(request)._returnval
    task = VITask(ret, server)

    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    vm_obj.refresh_snapshot_list()
    if status != task.STATE_SUCCESS:
        raise HypervisorError("Snapshot:delete %s" % task.get_error_message())
Exemplo n.º 14
0
def delete_snapshot(server, vm_obj, snapshot_obj, remove_children=False):
    request = VI.RemoveSnapshot_TaskRequestMsg()
    mor_snap = request.new__this(snapshot_obj._mor)
    mor_snap.set_attribute_type(snapshot_obj._mor.get_attribute_type())
    request.set_element__this(mor_snap)
    request.set_element_removeChildren(remove_children)
    ret = server._proxy.RemoveSnapshot_Task(request)._returnval
    task = VITask(ret, server)

    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    vm_obj.refresh_snapshot_list()
    if status != task.STATE_SUCCESS:
        raise HypervisorError("Snapshot:delete %s" % task.get_error_message())
Exemplo n.º 15
0
def remove_vm(vm_name):
    vm = server.get_vm_by_name(vm_name)
    request = VI.Destroy_TaskRequestMsg()
    tiger = request.new__this(vm._mor)
    tiger.set_attribute_type(vm._mor.get_attribute_type())
    request.set_element__this(tiger)
    ret = server._proxy.Destroy_Task(request)._returnval
    task = VITask(ret, server)
    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status == task.STATE_SUCCESS:
        print "VM successfully deleted from disk"
    elif status == task.STATE_ERROR:
        print "Error removing vm:", task.get_error_message()
Exemplo n.º 16
0
def delete_vm(server, vm_obj):
    # Invoke Destroy_Task
    request = VI.Destroy_TaskRequestMsg()
    _this = request.new__this(vm_obj._mor)
    _this.set_attribute_type(vm_obj._mor.get_attribute_type())
    request.set_element__this(_this)
    ret = server._proxy.Destroy_Task(request)._returnval

    # Wait for the task to finish
    task = VITask(ret, server)

    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status != task.STATE_SUCCESS:
        raise HypervisorError("Guest:delete %s" % task.get_error_message())
Exemplo n.º 17
0
def delete_vm(server, vm_obj):
    # Invoke Destroy_Task
    request = VI.Destroy_TaskRequestMsg()
    _this = request.new__this(vm_obj._mor)
    _this.set_attribute_type(vm_obj._mor.get_attribute_type())
    request.set_element__this(_this)
    ret = server._proxy.Destroy_Task(request)._returnval

    # Wait for the task to finish
    task = VITask(ret, server)

    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status != task.STATE_SUCCESS:
        raise HypervisorError("Guest:delete %s" % task.get_error_message())
Exemplo n.º 18
0
    def delete_vm(self, vm):
        #Invoke Destroy_Task
        request = VI.Destroy_TaskRequestMsg()

        _this = request.new__this(vm.esxi_vm._mor)
        _this.set_attribute_type(vm.esxi_vm._mor.get_attribute_type())
        request.set_element__this(_this)
        ret = self.esxi_server._proxy.Destroy_Task(request)._returnval
        task = VITask(ret, self.esxi_server)

        #Wait for the task to finish
        task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
        if task.get_state() == task.STATE_ERROR:
            raise Exception("Error removing vm:", task.get_error_message())
Exemplo n.º 19
0
    def change_vdisk_size(self, vminstance, vdisk_name, vdisk_size):
        '''修改虚拟磁盘容量大小'''
        vdisk_name_str = vdisk_name.encode(
            'utf-8')  # 对磁盘名称进行解码(网络传输过来的值是unicode编码标准),否则将设置失败
        vm_obj = vminstance
        size_kb = int(vdisk_size) * 1024 * 1024  #GB转换为KB
        sizes = {}
        sizes[vdisk_name_str] = size_kb
        print sizes
        hd_sizes_kb = sizes
        hd_to_modify = []
        for dev in vm_obj.properties.config.hardware.device:
            if dev._type == "VirtualDisk" and dev.deviceInfo.label in hd_sizes_kb:
                dev_obj = dev._obj
                dev_obj.set_element_capacityInKB(
                    hd_sizes_kb[dev.deviceInfo.label])
                hd_to_modify.append(dev_obj)

        request = VI.ReconfigVM_TaskRequestMsg()
        _this = request.new__this(vm_obj._mor)
        _this.set_attribute_type(vm_obj._mor.get_attribute_type())
        request.set_element__this(_this)
        spec = request.new_spec()

        # Change the HDs sizes
        dev_changes = []
        for hd in hd_to_modify:
            dev_change = spec.new_deviceChange()
            dev_change.set_element_operation("edit")
            dev_change.set_element_device(hd)
            dev_changes.append(dev_change)
        if dev_changes:
            spec.set_element_deviceChange(dev_changes)

        request.set_element_spec(spec)
        ret = self.s._proxy.ReconfigVM_Task(request)._returnval

        # Wait for the task to finish
        task = VITask(ret, self.s)
        status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
        if status == task.STATE_SUCCESS:
            print "VM successfully reconfigured"
            return True
        elif status == task.STATE_ERROR:
            print "Error reconfiguring vm: %s" % task.get_error_message()
            return False
        self.s.disconnect()
Exemplo n.º 20
0
def remove_nic_vm(vCenterserver, username, password, datacentername, vm_name,
                  networklabel):
    con = vs_connect(vCenterserver, username, password)
    net_device = None
    vm_obj = con.get_vm_by_name(vm_name, datacenter=datacentername)
    if not vm_obj:
        raise Exception("VM %s not found" % vm_name)

        #Find nic device

    for dev in vm_obj.properties.config.hardware.device:
        if (dev._type in [
                "VirtualE1000", "VirtualE1000e", "VirtualPCNet32",
                "VirtualVmxnet", "VirtualNmxnet2", "VirtualVmxnet3"
        ] and hasattr(dev, "backing")
                and dev.deviceInfo.label == networklabel):
            net_device = dev._obj
            break

    if not net_device:
        raise Exception("The vm_name seems to lack a Virtual Nic")

    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vm_obj._mor)
    _this.set_attribute_type(vm_obj._mor.get_attribute_type())
    request.set_element__this(_this)
    spec = request.new_spec()
    dev_change = spec.new_deviceChange()
    dev_change.set_element_operation("remove")
    dev_change.set_element_device(net_device)
    # Submit the device change
    spec.set_element_deviceChange([dev_change])
    request.set_element_spec(spec)
    ret = con._proxy.ReconfigVM_Task(request)._returnval

    # Wait for the task to finish
    task = VITask(ret, con)

    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status == task.STATE_SUCCESS:
        return "VM successfully reconfigured"
    elif status == task.STATE_ERROR:
        return "failure reconfiguring vm_name: " + str(
            vm_obj, task.get_error_message())
    else:
        return " failure VM not found"
Exemplo n.º 21
0
def delete_vm_by_name(name, server, remove_files=True):
    """Delete VM

    Unregisters a VM and remove it files from the datastore by name.
    @name is the VM name.
    @remove_files - if True (default) will delete VM files from datastore.
    """
    # Import
    from pysphere import VITask
    from pysphere.resources import VimService_services as VI

    try:
        #Get VM
        vm = server.get_vm_by_name(name)

        if remove_files:
            #Invoke Destroy_Task
            request = VI.Destroy_TaskRequestMsg()

            _this = request.new__this(vm._mor)
            _this.set_attribute_type(vm._mor.get_attribute_type())
            request.set_element__this(_this)
            ret = server._proxy.Destroy_Task(request)._returnval
            task = VITask(ret, server)

            #Wait for the task to finish
            status = task.wait_for_state([task.STATE_SUCCESS,
                                          task.STATE_ERROR])
            if status == task.STATE_SUCCESS:
                print "VM successfully unregistered and deleted from datastore"
            elif status == task.STATE_ERROR:
                print "Error removing vm:", task.get_error_message()
        elif not remove_files:
            #Invoke UnregisterVMRequestMsg
            request = VI.UnregisterVMRequestMsg()

            _this = request.new__this(vm._mor)
            _this.set_attribute_type(vm._mor.get_attribute_type())
            request.set_element__this(_this)
            ret = server._proxy.UnregisterVM(request)
            task = VITask(ret, server)

            print "Done."

    except (VI.ZSI.FaultException), e:
        raise VIApiException(e)
Exemplo n.º 22
0
    def delete_vdisk(self, vminstance, unit_number):
        '''删除虚拟机的硬盘
        :parameter
            @vvminstance: 虚拟机实例
            @unit_number: 硬盘ID
        :returns
            pass
        '''
        UNIT_NUMBER = unit_number  # Virtual disk unit number
        vm = vminstance
        # find the device to be removed
        dev = [
            dev for dev in vm.properties.config.hardware.device
            if dev._type == "VirtualDisk" and dev.unitNumber == UNIT_NUMBER
        ]
        if not dev:
            raise Exception("NO DEVICE FOUND")
        dev = dev[0]._obj
        request = VI.ReconfigVM_TaskRequestMsg()
        _this = request.new__this(vm._mor)
        _this.set_attribute_type(vm._mor.get_attribute_type())
        request.set_element__this(_this)

        spec = request.new_spec()
        dc = spec.new_deviceChange()
        dc.Operation = "remove"
        dc.Device = dev

        spec.DeviceChange = [dc]
        request.Spec = spec

        task = self.s._proxy.ReconfigVM_Task(request)._returnval
        vi_task = VITask(task, self.s)

        status = vi_task.wait_for_state(
            [vi_task.STATE_SUCCESS, vi_task.STATE_ERROR])
        if status == vi_task.STATE_ERROR:
            print "Error removing hdd from vm:", vi_task.get_error_message()
            sys.exit(1)
            return False
        else:
            print "Hard drive successfully removed"
            return True
Exemplo n.º 23
0
def add_new_nic(server, datacentername, vm, network_name):
    '''
  add new NIC to a VM
  server: VIServer object
  datacentername: datacenter name
  vm: VIVirtualMachine name
  network_name: network name
  '''
    net_device = None
    vm_obj = server.get_vm_by_name(vm, datacenter=datacentername)
    if not vm_obj:
        raise Exception("VM not found")

    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vm_obj._mor)
    _this.set_attribute_type(vm_obj._mor.get_attribute_type())
    request.set_element__this(_this)
    spec = request.new_spec()

    #add a nic.
    dev_change = spec.new_deviceChange()
    dev_change.set_element_operation("add")
    nic_ctlr = VI.ns0.VirtualPCNet32_Def("nic_ctlr").pyclass()
    nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def(
        "nic_backing").pyclass()
    nic_backing.set_element_deviceName(network_name)
    nic_ctlr.set_element_addressType("generated")
    nic_ctlr.set_element_backing(nic_backing)
    nic_ctlr.set_element_key(4)
    dev_change.set_element_device(nic_ctlr)

    spec.set_element_deviceChange([dev_change])
    request.set_element_spec(spec)
    ret = server._proxy.ReconfigVM_Task(request)._returnval

    #Wait for the task to finish
    task = VITask(ret, server)
    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status == task.STATE_SUCCESS:
        return "VM successfully reconfigured"
    elif status == task.STATE_ERROR:
        return "failure reconfiguring vm: " + str(task.get_error_message())
Exemplo n.º 24
0
 def reconfigure(self, spec_content, sync=True):
     try:
         request = VI.ReconfigVM_TaskRequestMsg()
         _this = request.new__this(self.vm._mor)
         _this.set_attribute_type(self.vm._mor.get_attribute_type())
         request.set_element__this(_this)
         spec = request.new_spec()
         spec.set_element_bootOptions(spec_content)
         request.set_element_spec(spec)
         task = self.vm._server._proxy.ReconfigVM_Task(request)._returnval
         vi_task = VITask(task, self.vm._server)
         if sync:
             status = vi_task.wait_for_state([vi_task.STATE_SUCCESS,
                                              vi_task.STATE_ERROR])
             if status == vi_task.STATE_ERROR:
                 raise VIException(vi_task.get_error_message(),
                                   FaultTypes.TASK_ERROR)
             return
         return vi_task
     except (VI.ZSI.FaultException), e:
         raise VIApiException(e)
Exemplo n.º 25
0
 def reconfigure(self, spec_content, sync=True):
     try:
         request = VI.ReconfigVM_TaskRequestMsg()
         _this = request.new__this(self.vm._mor)
         _this.set_attribute_type(self.vm._mor.get_attribute_type())
         request.set_element__this(_this)
         spec = request.new_spec()
         spec.set_element_bootOptions(spec_content)
         request.set_element_spec(spec)
         task = self.vm._server._proxy.ReconfigVM_Task(request)._returnval
         vi_task = VITask(task, self.vm._server)
         if sync:
             status = vi_task.wait_for_state(
                 [vi_task.STATE_SUCCESS, vi_task.STATE_ERROR])
             if status == vi_task.STATE_ERROR:
                 raise VIException(vi_task.get_error_message(),
                                   FaultTypes.TASK_ERROR)
             return
         return vi_task
     except (VI.ZSI.FaultException), e:
         raise VIApiException(e)
Exemplo n.º 26
0
def create_snapshot(server, vm_obj, snapshot_name):
    snapshot_id = str(uuid.uuid4())
    request = VI.CreateSnapshot_TaskRequestMsg()
    _this = request.new__this(vm_obj._mor)
    _this.set_attribute_type(vm_obj._mor.get_attribute_type())
    request.set_element__this(_this)

    request.set_element_name(snapshot_name)
    request.set_element_description(snapshot_id)
    request.set_element_memory(False)
    request.set_element_quiesce(False)

    ret = server._proxy.CreateSnapshot_Task(request)._returnval
    task = VITask(ret, server)

    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])

    if status != task.STATE_SUCCESS:
        raise HypervisorError("Snapshot:create %s" % task.get_error_message())

    vm_obj.refresh_snapshot_list()
    return get_snapshot(vm_obj, snapshot_id)
Exemplo n.º 27
0
def apply_changes(vm, cdrom):
    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vm._mor)
    _this.set_attribute_type(vm._mor.get_attribute_type())
    request.set_element__this(_this)
    spec = request.new_spec()

    dev_change = spec.new_deviceChange()
    dev_change.set_element_device(cdrom)
    dev_change.set_element_operation("edit")

    spec.set_element_deviceChange([dev_change])
    request.set_element_spec(spec)
    ret = server._proxy.ReconfigVM_Task(request)._returnval

    task = VITask(ret, server)
    status = task.wait_for_state([task.STATE_SUCCESS,task.STATE_ERROR])
    if status == task.STATE_SUCCESS:
        print "%s: successfully reconfigured" % vm.properties.name
    elif status == task.STATE_ERROR:
        print "%s: Error reconfiguring vm"% vm.properties.name
        print "%s" % task.get_error_message()
Exemplo n.º 28
0
def create_snapshot(server, vm_obj, snapshot_name):
    snapshot_id = str(uuid.uuid4())
    request = VI.CreateSnapshot_TaskRequestMsg()
    _this = request.new__this(vm_obj._mor)
    _this.set_attribute_type(vm_obj._mor.get_attribute_type())
    request.set_element__this(_this)

    request.set_element_name(snapshot_name)
    request.set_element_description(snapshot_id)
    request.set_element_memory(False)
    request.set_element_quiesce(False)

    ret = server._proxy.CreateSnapshot_Task(request)._returnval
    task = VITask(ret, server)

    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])

    if status != task.STATE_SUCCESS:
        raise HypervisorError("Snapshot:create %s" % task.get_error_message())

    vm_obj.refresh_snapshot_list()
    return get_snapshot(vm_obj, snapshot_id)
Exemplo n.º 29
0
def create_nas_store(host, access_mode, local_path, remote_host, remote_path,
                     username=None, password=None, volume_type='NFS'):

    #access_mode: 'readOnly' or 'readWrite'
    #volume_type: 'CIFS' or 'NFS' (if not set defaults to NFS)
    
    host_properties = VIProperty(server, host)
    
    hds = host_properties.configManager.datastoreSystem._obj

    request = VI.CreateNasDatastoreRequestMsg()
    _this = request.new__this(hds)
    _this.set_attribute_type(hds.get_attribute_type())
    request.set_element__this(_this)
    
    spec = request.new_spec()
    spec.set_element_accessMode(access_mode)
    spec.set_element_localPath(local_path)
    spec.set_element_remoteHost(remote_host)
    spec.set_element_remotePath(remote_path)
    if username:
        spec.set_element_userName(username)
    if password:
        spec.set_element_password(password)
    if volume_type:
        spec.set_element_type(volume_type)
    
    request.set_element_spec(spec)

    ret = server._proxy.CreateNasDatastore(request)._returnval

    task = VITask(ret, server)
    status = task.wait_for_state([task.STATE_SUCCESS,task.STATE_ERROR])
    if status == task.STATE_SUCCESS:
      print "Successfully mounted NFS share"
    elif status == task.STATE_ERROR:
      print "Error mounting NFS share: %s" % task.get_error_message()
Exemplo n.º 30
0
def change_memory_size(vminstance, memory_gb):
    '''Modify the memory size of the virtual machine'''
    memory_mb = memory_gb * 1024  #GB conversion MB
    vm_obj = vminstance
    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vm_obj._mor)
    _this.set_attribute_type(vm_obj._mor.get_attribute_type())
    request.set_element__this(_this)
    spec = request.new_spec()

    # Memory size
    spec.set_element_memoryMB(memory_mb)

    request.set_element_spec(spec)
    ret = server._proxy.ReconfigVM_Task(request)._returnval
    task = VITask(ret, server)
    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status == task.STATE_SUCCESS:
        print "VM successfully reconfigured"
        return True
    elif status == task.STATE_ERROR:
        print "Error reconfiguring vm: %s" % task.get_error_message()
        return False
    server.disconnect()
Exemplo n.º 31
0
def add_nic_vm_and_connect_to_net(vCenterserver,
                                  username,
                                  password,
                                  datacentername,
                                  vm,
                                  dvswitch_uuid,
                                  portgroupKey,
                                  network_name="VM Network",
                                  nic_type="vmxnet3",
                                  network_type="standard"):

    ### add a NIC
    # The network Name must be set as the device name to create the NIC.
    # Different network card types are: "VirtualE1000", "VirtualE1000e","VirtualPCNet32", "VirtualVmxnet", "VirtualNmxnet2", "VirtualVmxnet3"
    net_device = None
    con = vs_connect(vCenterserver, username, password)
    vm_obj = con.get_vm_by_name(vm, datacenter=datacentername)
    if not vm_obj:
        raise Exception("VM %s not found" % vm)

    #Find nic device
    for dev in vm_obj.properties.config.hardware.device:
        if dev._type in [
                "VirtualE1000", "VirtualE1000e", "VirtualPCNet32",
                "VirtualVmxnet", "VirtualNmxnet2", "VirtualVmxnet3"
        ]:
            net_device = dev._obj
            break

    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vm_obj._mor)
    _this.set_attribute_type(vm_obj._mor.get_attribute_type())
    request.set_element__this(_this)
    spec = request.new_spec()
    dev_change = spec.new_deviceChange()
    dev_change.set_element_device(net_device)
    #dev_change.set_element_operation("edit")

    if network_name:
        dev_change.set_element_operation("add")

        if nic_type == "e1000":
            nic_ctlr = VI.ns0.VirtualE1000_Def("nic_ctlr").pyclass()
        elif nic_type == "e1000e":
            nic_ctlr = VI.ns0.VirtualE1000e_Def("nic_ctlr").pyclass()
        elif nic_type == "pcnet32":
            nic_ctlr = VI.ns0.VirtualPCNet32_Def("nic_ctlr").pyclass()
        elif nic_type == "vmxnet":
            nic_ctlr = VI.ns0.VirtualVmxnet_Def("nic_ctlr").pyclass()
        elif nic_type == "vmxnet2":
            nic_ctlr = VI.ns0.VirtualVmxnet2_Def("nic_ctlr").pyclass()
        elif nic_type == "vmxnet3":
            nic_ctlr = VI.ns0.VirtualVmxnet3_Def("nic_ctlr").pyclass()

        if network_type == "standard":
            # Standard switch
            nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def(
                "nic_backing").pyclass()
            nic_backing.set_element_deviceName(network_name)
        elif network_type == "dvs":
            nic_backing_port = VI.ns0.DistributedVirtualSwitchPortConnection_Def(
                "nic_backing_port").pyclass()
            nic_backing_port.set_element_switchUuid(dvswitch_uuid)
            nic_backing_port.set_element_portgroupKey(portgroupKey)

            # http://www.vmware.com/support/developer/vc-sdk/visdk400pubs/ReferenceGuide/vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo.html
            nic_backing = VI.ns0.VirtualEthernetCardDistributedVirtualPortBackingInfo_Def(
                "nic_backing").pyclass()
            nic_backing.set_element_port(nic_backing_port)

            # How they do it in powershell
            # http://www.lucd.info/2010/03/04/dvswitch-scripting-part-8-get-and-set-network-adapters/
            # How they do it in ruby
            # https://github.com/fog/fog/pull/1431/files
        nic_ctlr.set_element_addressType("generated")
        nic_ctlr.set_element_backing(nic_backing)
        nic_ctlr.set_element_key(4)
        dev_change.set_element_device(nic_ctlr)
        spec.set_element_deviceChange([dev_change])
        request.set_element_spec(spec)
        ret = con._proxy.ReconfigVM_Task(request)._returnval
        #Wait for the task to finish
        task = VITask(ret, con)
        status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
        if status == task.STATE_SUCCESS:
            return "VM successfully reconfigured"
        elif status == task.STATE_ERROR:
            return "failure reconfiguring vm: " + str(task.get_error_message())
    else:
        return "failure reconfiguring vm network_name is mandatory"
    def clone(self,
              name,
              sync_run=True,
              folder=None,
              resourcepool=None,
              datastore=None,
              host=None,
              power_on=True,
              template=False,
              snapshot=None,
              linked=False,
              customize=None,
              data=None):
        """Clones this Virtual Machine
        @name: name of the new virtual machine
        @sync_run: if True (default) waits for the task to finish, and returns
            a VIVirtualMachine instance with the new VM (raises an exception if 
        the task didn't succeed). If sync_run is set to False the task is 
        started and a VITask instance is returned
        @folder: name of the folder that will contain the new VM, if not set
            the vm will be added to the folder the original VM belongs to
        @resourcepool: MOR of the resourcepool to be used for the new vm. 
            If not set, it uses the same resourcepool than the original vm.
        @datastore: MOR of the datastore where the virtual machine
            should be located. If not specified, the current datastore is used.
        @host: MOR of the host where the virtual machine should be registered.
            IF not specified:
              * if resourcepool is not specified, current host is used.
              * if resourcepool is specified, and the target pool represents a
                stand-alone host, the host is used.
              * if resourcepool is specified, and the target pool represents a
                DRS-enabled cluster, a host selected by DRS is used.
              * if resource pool is specified and the target pool represents a 
                cluster without DRS enabled, an InvalidArgument exception be
                thrown.
        @power_on: If the new VM will be powered on after being created. If
            template is set to True, this parameter is ignored.
        @template: Specifies whether or not the new virtual machine should be 
            marked as a template.         
        @snapshot: Snaphot MOR, or VISnaphost object, or snapshot name (if a
            name is given, then the first matching occurrence will be used). 
            Is the snapshot reference from which to base the clone. If this 
            parameter is set, the clone is based off of the snapshot point. This 
            means that the newly created virtual machine will have the same 
            configuration as the virtual machine at the time the snapshot was 
            taken. If this parameter is not set then the clone is based off of 
            the virtual machine's current configuration.
        @linked: If True (requires @snapshot to be set) creates a new child disk
            backing on the destination datastore. None of the virtual disk's 
            existing files should be moved from their current locations.
            Note that in the case of a clone operation, this means that the 
            original virtual machine's disks are now all being shared. This is
            only safe if the clone was taken from a snapshot point, because 
            snapshot points are always read-only. Thus for a clone this option 
            is only valid when cloning from a snapshot
        """
        try:
            #get the folder to create the VM
            folders = self._server._retrieve_properties_traversal(
                property_names=['name', 'childEntity'],
                obj_type=MORTypes.Folder)
            folder_mor = None
            for f in folders:
                fname = ""
                children = []
                for prop in f.PropSet:
                    if prop.Name == "name":
                        fname = prop.Val
                    elif prop.Name == "childEntity":
                        children = prop.Val.ManagedObjectReference
                if folder == fname or (not folder and self._mor in children):
                    folder_mor = f.Obj
                    break
            if not folder_mor and folder:
                raise VIException("Couldn't find folder %s" % folder,
                                  FaultTypes.OBJECT_NOT_FOUND)
            elif not folder_mor:
                raise VIException("Error locating current VM folder",
                                  FaultTypes.OBJECT_NOT_FOUND)

            request = VI.CloneVM_TaskRequestMsg()
            _this = request.new__this(self._mor)
            _this.set_attribute_type(self._mor.get_attribute_type())
            request.set_element__this(_this)
            request.set_element_folder(folder_mor)
            request.set_element_name(name)
            spec = request.new_spec()
            if template:
                spec.set_element_powerOn(False)
            else:
                spec.set_element_powerOn(power_on)
            location = spec.new_location()
            if resourcepool:
                if not VIMor.is_mor(resourcepool):
                    resourcepool = VIMor(resourcepool, MORTypes.ResourcePool)
                pool = location.new_pool(resourcepool)
                pool.set_attribute_type(resourcepool.get_attribute_type())
                location.set_element_pool(pool)
            if datastore:
                if not VIMor.is_mor(datastore):
                    datastore = VIMor(datastore, MORTypes.Datastore)
                ds = location.new_datastore(datastore)
                ds.set_attribute_type(datastore.get_attribute_type())
                location.set_element_datastore(ds)
            if host:
                if not VIMor.is_mor(host):
                    host = VIMor(host, MORTypes.HostSystem)
                hs = location.new_host(host)
                hs.set_attribute_type(host.get_attribute_type())
                location.set_element_host(hs)
            if snapshot:
                sn_mor = None
                if VIMor.is_mor(snapshot):
                    sn_mor = snapshot
                elif isinstance(snapshot, VISnapshot):
                    sn_mor = snapshot._mor
                elif isinstance(snapshot, basestring):
                    for sn in self.get_snapshots():
                        if sn.get_name() == snapshot:
                            sn_mor = sn._mor
                            break
                if not sn_mor:
                    raise VIException(
                        "Could not find snapshot '%s'" % snapshot,
                        FaultTypes.OBJECT_NOT_FOUND)
                snapshot = spec.new_snapshot(sn_mor)
                snapshot.set_attribute_type(sn_mor.get_attribute_type())
                spec.set_element_snapshot(snapshot)

            if linked and snapshot:
                location.set_element_diskMoveType("createNewChildDiskBacking")

            if not template and customize:
                if data is None:
                    raise VIApiException(
                        "Cannot use Customization without data")

                customization = spec.new_customization()
                spec.set_element_customization(customization)

                globalIPSettings = customization.new_globalIPSettings()
                customization.set_element_globalIPSettings(globalIPSettings)

                # nicSettingMap
                nicSetting = customization.new_nicSettingMap()
                adapter = nicSetting.new_adapter()
                nicSetting.set_element_adapter(adapter)

                ipAddress = data.get('ip')
                netmask = data.get('netmask')
                gateway = data.get('gateway')

                fixedip = VI.ns0.CustomizationFixedIp_Def(
                    "ipAddress").pyclass()
                fixedip.set_element_ipAddress(ipAddress)

                #dhcp = VI.ns0.CustomizationDhcpIpGenerator_Def("ip").pyclass()
                adapter.set_element_ip(fixedip)
                adapter.set_element_subnetMask(netmask)
                #help(adapter.set_element_gateway([gateway,]))
                adapter.set_element_gateway([
                    gateway,
                ])

                nicSetting.set_element_adapter(adapter)
                customization.set_element_nicSettingMap([
                    nicSetting,
                ])

                if customize == "SYSPREP":
                    # here starts windows
                    identity = VI.ns0.CustomizationSysprep_Def(
                        "identity").pyclass()
                    customization.set_element_identity(identity)

                    guiUnattended = identity.new_guiUnattended()
                    guiUnattended.set_element_autoLogon(True)
                    guiUnattended.set_element_autoLogonCount(1)

                    passw = guiUnattended.new_password()
                    guiUnattended.set_element_password(passw)
                    passw.set_element_value(data["adminpw"])
                    passw.set_element_plainText(True)

                    # http://msdn.microsoft.com/en-us/library/ms912391(v=winembedded.11).aspx
                    # 85 is GMT Standard Time
                    timeZone = data.get("timezone", 85)
                    guiUnattended.set_element_timeZone(timeZone)
                    identity.set_element_guiUnattended(guiUnattended)

                    userData = identity.new_userData()
                    userData.set_element_fullName(
                        data.get("fullName", "PyShere"))
                    userData.set_element_orgName(
                        data.get("orgName", "PySphere"))
                    userData.set_element_productId("")
                    computerName = VI.ns0.CustomizationFixedName_Def(
                        "computerName").pyclass()
                    computerName.set_element_name(name.replace("_", ""))
                    userData.set_element_computerName(computerName)
                    identity.set_element_userData(userData)

                    identification = identity.new_identification()

                    if data.get("joinDomain", False):
                        # join the domain
                        identification.set_element_domainAdmin(
                            data["domainAdmin"])
                        domainAdminPassword = identification.new_domainAdminPassword(
                        )
                        domainAdminPassword.set_element_plainText(True)
                        domainAdminPassword.set_element_value(
                            data["domainAdminPassword"])
                        identification.set_element_domainAdminPassword(
                            domainAdminPassword)
                        identification.set_element_joinDomain(
                            data["joinDomain"])
                        identity.set_element_identification(identification)
                elif customize == "SYSPREPTEXT":
                    identity = VI.ns0.CustomizationSysprepText_Def(
                        "identity").pyclass()
                    customization.set_element_identity(identity)
                    identity.set_element_value(data["value"])

                elif customize == "LINUX":
                    identity = VI.ns0.CustomizationLinuxPrep_Def(
                        "identity").pyclass()
                    customization.set_element_identity(identity)
                    identity.set_element_domain(data["domain"])
                    hostName = VI.ns0.CustomizationFixedName_Def(
                        "hostName").pyclass()
                    hostName.set_element_name(name.replace("_", ""))
                    identity.set_element_hostName(hostName)

            spec.set_element_location(location)
            spec.set_element_template(template)
            request.set_element_spec(spec)
            task = self._server._proxy.CloneVM_Task(request)._returnval
            vi_task = VITask(task, self._server)
            if sync_run:
                status = vi_task.wait_for_state(
                    [vi_task.STATE_SUCCESS, vi_task.STATE_ERROR])
                if status == vi_task.STATE_ERROR:
                    raise VIException(vi_task.get_error_message(),
                                      FaultTypes.TASK_ERROR)
                return mianbao(self._server, vi_task.get_result()._obj)

            return vi_task

        except (VI.ZSI.FaultException), e:
            raise VIApiException(e)
Exemplo n.º 33
0
      request = VI.ReconfigVM_TaskRequestMsg()
      _this = request.new__this(vm._mor)
      _this.set_attribute_type(vm._mor.get_attribute_type())
      request.set_element__this(_this)
      spec = request.new_spec()
      dev_change = spec.new_deviceChange()
      dev_change.set_element_device(d)
      dev_change.set_element_operation("edit")
      spec.set_element_deviceChange([dev_change])
      request.set_element_spec(spec)
      ret = server._proxy.ReconfigVM_Task(request)._returnval

      task = VITask(ret, server)
      status = task.wait_for_state([task.STATE_SUCCESS,task.STATE_ERROR])
      if status == task.STATE_SUCCESS:
        print "%s: successfully reconfigured" % vm.properties.name
      elif status == task.STATE_ERROR:
        print "%s: Error reconfiguring vm" % vm.properties.name
        print "%s" % task.get_error_message()
elif options.device == "host":
  #HOST DEVICE: hardcoded to use the first valid host device
  valid_host_device = get_valid_host_devices(vm)[0]
  change_cdrom_type(cdrom, "HOST DEVICE", valid_host_device)
elif options.device == "client":
  #CLIENT DEVICE
  change_cdrom_type(cdrom, "CLIENT DEVICE")

apply_changes(vm, cdrom)

server.disconnect()
Exemplo n.º 34
0
    nic_ctlr.set_element_backing(nic_backing)
    nic_ctlr.set_element_key(4)
    dev_change.set_element_device(nic_ctlr)

    spec.set_element_deviceChange([dev_change])
    request.set_element_spec(spec)

    task = server._proxy.ReconfigVM_Task(request)._returnval
    vi_task = VITask(task, server)

    # Wait for task to finish
    status = vi_task.wait_for_state(
        [vi_task.STATE_SUCCESS, vi_task.STATE_ERROR])

    if status == vi_task.STATE_ERROR:
        module.fail_json(msg=vi_task.get_error_message())

    data = vm.get_properties(from_cache=False)
    facts = {
        'module_hw': True,
        'hw_name': vm.properties.name,
        'hw_guest_full_name': vm.properties.config.guestFullName,
        'hw_guest_id': vm.properties.config.guestId,
        'hw_product_uuid': vm.properties.config.uuid,
        'hw_processor_count': vm.properties.config.hardware.numCPU,
        'hw_memtotal_mb': vm.properties.config.hardware.memoryMB,
    }

    ifidx = 0
    for entry in vm.properties.config.hardware.device:
Exemplo n.º 35
0
dev_change = spec.new_deviceChange()
dev_change.set_element_operation("add")

# We use a VMXNET3 controller here.  Introspect into
# VI.ns0 for all available controller names.
nic_ctlr = VI.ns0.VirtualVmxnet3_Def("nic_ctlr").pyclass()

nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def(
    "nic_backing").pyclass()
nic_backing.set_element_deviceName(opt_net)
nic_ctlr.set_element_addressType("generated")
nic_ctlr.set_element_backing(nic_backing)
nic_ctlr.set_element_key(4)
dev_change.set_element_device(nic_ctlr)

spec.set_element_deviceChange([dev_change])
request.set_element_spec(spec)

task = server._proxy.ReconfigVM_Task(request)._returnval
vi_task = VITask(task, server)

# Wait for task to finish
status = vi_task.wait_for_state([vi_task.STATE_SUCCESS, vi_task.STATE_ERROR])

if status == vi_task.STATE_ERROR:
    print "ERROR CONFIGURING VM:", vi_task.get_error_message()
else:
    print "VM CONFIGURED SUCCESSFULLY"

server.disconnect()
    pool.set_attribute_type(rp_mor.get_attribute_type())
    location.set_element_pool(pool)

if host_mor:
    hs = location.new_host(host_mor)
    hs.set_attribute_type(host_mor.get_attribute_type())
    location.set_element_host(hs)

spec.set_element_location(location)

# powerOn
spec.set_element_powerOn(False)

# template
spec.set_element_template(False)

# Set the 'spec' element of the request
request.set_element_spec(spec)

# Execute the request
task = server._proxy.CloneVM_Task(request)._returnval

vi_task = VITask(task, server)

status = vi_task.wait_for_state([vi_task.STATE_SUCCESS, vi_task.STATE_ERROR])

if status == vi_task.STATE_ERROR:
    print('Failed')

    print(vi_task.get_error_message())
Exemplo n.º 37
0
def requestNewSpec(vmname):
    statusdict = {}

    try:
        vm_obj = server.get_vm_by_name(vmname)
    except VIException as inst:
        statusdict["error"] = str(inst)
        print json.dumps(statusdict)
        sys.exit()

    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vm_obj._mor)
    _this.set_attribute_type(vm_obj._mor.get_attribute_type())
    request.set_element__this(_this)
    spec = request.new_spec()

    if args.memory:
        memory_mb = int(args.memory[0])
        #set the new RAM size
        spec.set_element_memoryMB(memory_mb)

    if args.disk:
        hd_name = str(args.disk[1])
        hd_sizes_kb = {hd_name: int(args.disk[0]) * 1024 * 1024}  #GB
        hd_to_modify = []
        for dev in vm_obj.properties.config.hardware.device:
            if dev._type == "VirtualDisk" and dev.deviceInfo.label in hd_sizes_kb:
                dev_obj = dev._obj
                dev_obj.set_element_capacityInKB(
                    hd_sizes_kb[dev.deviceInfo.label])
                hd_to_modify.append(dev_obj)
        #Change the HDs sizes
        dev_changes = []
        for hd in hd_to_modify:
            dev_change = spec.new_deviceChange()
            dev_change.set_element_operation("edit")
            dev_change.set_element_device(hd)
            dev_changes.append(dev_change)
        if dev_changes:
            spec.set_element_deviceChange(dev_changes)

    if args.cpu:
        numcpu = int(args.cpu[0])
        numcorespersocket = int(args.cpu[1])
        temp = float(numcpu) / float(numcorespersocket)
        temp = (temp % 1)
        if temp == float(0):
            if (numcpu / numcorespersocket) > 8:
                statusdict["cpu"] = "Only 8 socket for a VM is supported"
                print json.dumps(statusdict)
                sys.exit()
            else:
                spec.set_element_numCPUs(numcpu)
                spec.set_element_numCoresPerSocket(numcorespersocket)
        else:
            statusdict[
                "cpu"] = "No of CPU divided by No of Cores per Socket should always be a Integer"
            print json.dumps(statusdict)
            sys.exit()

    request.set_element_spec(spec)
    ret = server._proxy.ReconfigVM_Task(request)._returnval

    #Wait for the task to finish
    task = VITask(ret, server)
    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status == task.STATE_SUCCESS:
        #print "VM successfully reconfigured"
        statusdict[vmname] = "VM successfully reconfigured"
    elif status == task.STATE_ERROR:
        #print "Error reconfiguring vm: %s" % task.get_error_message()
        errormsg = str(task.get_error_message())
        statusdict["error"] = errormsg

    print json.dumps(statusdict)
Exemplo n.º 38
0
def update_vm(server, vm_obj, guestdata):
    new_annotation = guestdata.get("tags")
    new_cpus = guestdata.get("cpus")
    new_hdd = guestdata.get("hdd")
    new_iso = guestdata.get("iso")
    new_memory = guestdata.get("memory")
    new_name = guestdata.get("name")
    enable_vmi = guestdata.get("paravirtualized")

    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vm_obj._mor)
    _this.set_attribute_type(vm_obj._mor.get_attribute_type())
    request.set_element__this(_this)
    spec = request.new_spec()

    if new_name and vm_obj.properties.config.name != new_name:
        spec.set_element_name(new_name)

    if new_memory and vm_obj.properties.config.hardware.memoryMB != new_memory:
        # set the new RAM size
        spec.set_element_memoryMB(new_memory)

    if new_cpus and vm_obj.properties.config.hardware.numCPU != new_cpus:
        # set the new Cpu count
        spec.set_element_numCPUs(new_cpus)

    device_config_specs = []
    if new_hdd:
        disk_size = get_disk_size(vm_obj)
        if new_hdd * 1024 * 1024 > disk_size:
            disk = get_disks(vm_obj)[-1]
            hdd_in_GB = new_hdd * 1024 * 1024
            new_disk_size = hdd_in_GB - disk_size + disk.capacityInKB

            device_config_spec = spec.new_deviceChange()
            device_config_spec.set_element_operation('edit')
            disk._obj.set_element_capacityInKB(new_disk_size)
            device_config_spec.set_element_device(disk._obj)
            device_config_specs.append(device_config_spec)

    if new_iso:
        media_device = get_cd(vm_obj)
        connectable = media_device._obj.new_connectable()
        connectable.set_element_allowGuestControl(False)

        if new_iso.get("name") and new_iso["name"] != "":
            connectable.set_element_connected(True)
            connectable.set_element_startConnected(True)
            media_device._obj.set_element_connectable(connectable)

            backing = VCdIsoBackingInfo()
            backing.set_element_fileName(new_iso["name"])
            media_device._obj.set_element_backing(backing)
        else:
            connectable.set_element_connected(False)
            connectable.set_element_startConnected(False)
            media_device._obj.set_element_connectable(connectable)

        device_config_spec = spec.new_deviceChange()
        device_config_spec.set_element_operation('edit')
        device_config_spec.set_element_device(media_device._obj)
        device_config_specs.append(device_config_spec)

    if enable_vmi is not None:
        vmi_driver = get_vmi_driver(vm_obj)
        if enable_vmi and not vmi_driver:
            vmi_driver = VirtualMachineVMIROM()
            vmi_driver.set_element_key(11000)

            device_config_spec = spec.new_deviceChange()
            device_config_spec.set_element_operation('add')
            device_config_spec.set_element_device(vmi_driver)
            device_config_specs.append(device_config_spec)
        elif not enable_vmi and vmi_driver:
            device_config_spec = spec.new_deviceChange()
            device_config_spec.set_element_operation('remove')
            device_config_spec.set_element_device(vmi_driver._obj)
            device_config_specs.append(device_config_spec)

    if len(device_config_specs) != 0:
        spec.set_element_deviceChange(device_config_specs)

    if new_annotation:
        spec.set_element_annotation("\n".join(new_annotation))

    request.set_element_spec(spec)
    ret = server._proxy.ReconfigVM_Task(request)._returnval

    # Wait for the task to finish
    task = VITask(ret, server)
    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status != task.STATE_SUCCESS:
        raise HypervisorError("Guest:update %s" % task.get_error_message())
Exemplo n.º 39
0
    backing.ThinProvisioned = False  #if True - thin disk else thick disk
    backing.EagerlyScrub = True
elif args.adddisk[2] == "thick" and args.adddisk[3] == "lazy":
    backing.ThinProvisioned = False  #if True - thin disk else thick disk
    backing.EagerlyScrub = False
elif args.adddisk[2] == "thin" and args.adddisk[3] == "None":
    backing.ThinProvisioned = True  #if True - thin disk else thick disk
    backing.EagerlyScrub = False
hd.Backing = backing

dc.Device = hd

spec.DeviceChange = [dc]
request.Spec = spec

task = s._proxy.ReconfigVM_Task(request)._returnval
vi_task = VITask(task, s)

#Wait for task to finis
status = vi_task.wait_for_state([vi_task.STATE_SUCCESS, vi_task.STATE_ERROR])
if status == vi_task.STATE_ERROR:
    #print "ERROR CONFIGURING VM:", vi_task.get_error_message()
    statusdict["ERROR"] = str(vi_task.get_error_message())
else:
    #print "VM CONFIGURED SUCCESSFULLY"
    statusdict[VM_NAME] = "SUCCESS"

print json.dumps(statusdict)

s.disconnect()
Exemplo n.º 40
0
request.set_element__this(_this)

spec = request.new_spec()
vappconfig = spec.new_vAppConfig()
vappconfig.set_element_ovfEnvironmentTransport(['com.vmware.guestInfo'])

spec.set_element_vAppConfig(vappconfig)

request.set_element_spec(spec)
task = server._proxy.ReconfigVM_Task(request)._returnval
vi_task = VITask(task, server)

status = vi_task.wait_for_state([vi_task.STATE_SUCCESS, vi_task.STATE_ERROR])

if status == vi_task.STATE_ERROR:
    print "%s: Error enabling vApp options:" % options.vmName, vi_task.get_error_message(
    )
else:
    print "%s: VM vApp Options enabled" % options.vmName

#
# This section changes vApp properties
#

newconfig = {
    'add': [{
        'key': 11,
        'id': "custom_ip",
        'value': "",
        'category': 'network_config'
    }, {
        'key': 12,
Exemplo n.º 41
0
def change_dvs_net(server, datacentername, vm, pg_map):
    """
  Reconfigure dVS portgroups according to the mappings in the pg_map dict
  server: VIServer object
  datacentername: datacenter name
  vm_obj: VIVirtualMachine object
  pg_map: dict must contain the source portgroup as key and the destination portgroup as value
  """
    vm_obj = server.get_vm_by_name(vm, datacenter=datacentername)
    if not vm_obj:
        raise Exception("VM %s not found" % vm)
    #Find virtual NIC devices
    if vm_obj:
        net_device = []
        for dev in vm_obj.properties.config.hardware.device:
            if dev._type in [
                    "VirtualE1000", "VirtualE1000e", "VirtualPCNet32",
                    "VirtualVmxnet", "VirtualNmxnet2", "VirtualVmxnet3"
            ]:
                net_device.append(dev)

    # Throw an exception if there is no NIC found
    if len(net_device) == 0:
        raise Exception("The vm seems to lack a Virtual Nic")
    # Use pg_map to set the new Portgroups
    for dev in net_device:
        old_portgroup = dev.Backing.Port.PortgroupKey
        if pg_map.has_key(old_portgroup):
            dev.backing.port._obj.set_element_portgroupKey(
                pg_map[old_portgroup])
            dev.backing.port._obj.set_element_portKey('')

    # Invoke ReconfigVM_Task
    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vm_obj._mor)
    _this.set_attribute_type(vm_obj._mor.get_attribute_type())
    request.set_element__this(_this)

    # Build a list of device change spec objects
    devs_changed = []
    for dev in net_device:
        spec = request.new_spec()
        dev_change = spec.new_deviceChange()
        dev_change.set_element_device(dev._obj)
        dev_change.set_element_operation("edit")
        devs_changed.append(dev_change)

    # Submit the device change list
    spec.set_element_deviceChange(devs_changed)
    request.set_element_spec(spec)
    ret = server._proxy.ReconfigVM_Task(request)._returnval

    # Wait for the task to finish
    task = VITask(ret, server)

    status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
    if status == task.STATE_SUCCESS:
        return "VM successfully reconfigured"
    elif status == task.STATE_ERROR:
        return "failure reconfiguring vm: " + str(task.get_error_message())
    else:
        return " failure VM not found"
Exemplo n.º 42
0
HOST = "my esxi server"
USER = "******"
PASSWORD = "******"
DATASTORE = "datastore1"
VM_NAME = "vm-test-name"

s = VIServer()
s.connect(HOST, USER, PASSWORD)

#Get VM
vm = s.get_vm_by_name(VM_NAME)

#Invoke Destroy_Task
request = VI.Destroy_TaskRequestMsg()
_this = request.new__this(vm._mor)
_this.set_attribute_type(vm._mor.get_attribute_type())
request.set_element__this(_this)
ret = s._proxy.Destroy_Task(request)._returnval

#Wait for the task to finish
task = VITask(ret, s)

status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
if status == task.STATE_SUCCESS:
    print "VM successfully deleted from disk"
elif status == task.STATE_ERROR:
    print "Error removing vm:", task.get_error_message()

#Disconnect from the server
s.disconnect()
Exemplo n.º 43
0
memory_mb = 512

vm_obj = server.get_vm_by_name("DEV_VM.clone")

request = VI.ReconfigVM_TaskRequestMsg()
_this = request.new__this(vm_obj._mor)
_this.set_attribute_type(vm_obj._mor.get_attribute_type())
request.set_element__this(_this)
spec = request.new_spec()

#set the new RAM size
spec.set_element_memoryMB(memory_mb)

#set the no of CPU and socket
spec.set_element_numCPUs(2)
#spec.set_element_numCoresPerSocket(2)

request.set_element_spec(spec)
ret = server._proxy.ReconfigVM_Task(request)._returnval

#Wait for the task to finish
task = VITask(ret, server)
status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
if status == task.STATE_SUCCESS:
    print "VM successfully reconfigured"
elif status == task.STATE_ERROR:
    print "Error reconfiguring vm: %s" % task.get_error_message()

server.disconnect()
Exemplo n.º 44
0
    def _create_vm(self, volume_name, vm_name, vm_description, mem_megs,
        cpu_count, guest_os_id, disk_size, network_name, vm_folder, resource_pool, host):

        create_vm_request = VI.CreateVM_TaskRequestMsg()
        config = create_vm_request.new_config()
        vmfiles = config.new_files()
        vmfiles.set_element_vmPathName(volume_name)
        config.set_element_files(vmfiles)
        config.set_element_name(vm_name)
        config.set_element_annotation(vm_description)
        config.set_element_memoryMB(mem_megs)
        config.set_element_numCPUs(cpu_count)
        config.set_element_guestId(guest_os_id)
        devices = []

        #add a scsi controller
        disk_ctrl_key = 1
        scsi_ctrl_spec =config.new_deviceChange()
        scsi_ctrl_spec.set_element_operation('add')
        scsi_ctrl = VI.ns0.VirtualLsiLogicController_Def("scsi_ctrl").pyclass()
        scsi_ctrl.set_element_busNumber(0)
        scsi_ctrl.set_element_key(disk_ctrl_key)
        scsi_ctrl.set_element_sharedBus("noSharing")

        scsi_ctrl_spec.set_element_device(scsi_ctrl)
        devices.append(scsi_ctrl_spec)

        # create a new disk - file based - for the vm
        disk_spec = config.new_deviceChange()
        disk_spec.set_element_fileOperation("create")
        disk_spec.set_element_operation("add")
        disk_ctlr = VI.ns0.VirtualDisk_Def("disk_ctlr").pyclass()
        disk_backing = VI.ns0.VirtualDiskFlatVer2BackingInfo_Def("disk_backing").pyclass()
        disk_backing.set_element_fileName(volume_name)
        disk_backing.set_element_diskMode("persistent")
        disk_backing.set_element_thinProvisioned(True)
        disk_ctlr.set_element_key(0)
        disk_ctlr.set_element_controllerKey(disk_ctrl_key)
        disk_ctlr.set_element_unitNumber(0)
        disk_ctlr.set_element_backing(disk_backing)
        disk_ctlr.set_element_capacityInKB(disk_size)
        disk_spec.set_element_device(disk_ctlr)
        devices.append(disk_spec)

        #add a NIC. the network Name must be set as the device name to create the NIC.
        nic_spec = config.new_deviceChange()
        nic_spec.set_element_operation("add")
        nic_ctlr = VI.ns0.VirtualE1000_Def("nic_ctlr").pyclass()
        nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def("nic_backing").pyclass()
        nic_backing.set_element_deviceName(network_name)
        nic_ctlr.set_element_addressType("generated")
        nic_ctlr.set_element_backing(nic_backing)
        nic_ctlr.set_element_key(1)
        nic_spec.set_element_device(nic_ctlr)
        devices.append(nic_spec)

        config.set_element_deviceChange(devices)
        create_vm_request.set_element_config(config)
        folder_mor = create_vm_request.new__this(vm_folder)
        folder_mor.set_attribute_type(vm_folder.get_attribute_type())
        create_vm_request.set_element__this(folder_mor)
        rp_mor = create_vm_request.new_pool(resource_pool)
        rp_mor.set_attribute_type(resource_pool.get_attribute_type())
        create_vm_request.set_element_pool(rp_mor)
        host_mor = create_vm_request.new_host(host)
        host_mor.set_attribute_type(host.get_attribute_type())
        create_vm_request.set_element_host(host_mor)

        #CREATE THE VM
        taskmor = self.esxi_server._proxy.CreateVM_Task(create_vm_request)._returnval
        task = VITask(taskmor, self.esxi_server)
        task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])

        if task.get_state() == task.STATE_ERROR:
            raise Exception("Error creating vm: %s" % task.get_error_message())
Exemplo n.º 45
0
    backing.FileName = "[%s]" % datastore_name
    backing.DiskMode = "persistent"
    backing.Split = False
    backing.WriteThrough = False
    backing.ThinProvisioned = False
    backing.EagerlyScrub = False
    hd.Backing = backing

    disk_spec.Device = hd

    spec.DeviceChange = [disk_spec]
    request.Spec = spec

    task = server._proxy.ReconfigVM_Task(request)._returnval
    vi_task = VITask(task, server)

    # Wait for task to finish
    status = vi_task.wait_for_state(
        [vi_task.STATE_SUCCESS, vi_task.STATE_ERROR])

    if status == vi_task.STATE_ERROR:
        msg = "ERROR CONFIGURING VM: " + vi_task.get_error_message()
        module.fail_json(msg=msg)

    module.exit_json(changed=True, ansible_facts=facts)


# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main()
Exemplo n.º 46
0
def esx_vm_configure(config_json):

    config = config_create(config_json)
    properties = []

    esx = esx_connect(ESX_HOST, ESX_USER, ESX_PASS)
    vm = esx_vm_get(esx, config_json['vapp_net_hostname'])

    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vm._mor)
    _this.set_attribute_type(vm._mor.get_attribute_type())
    request.set_element__this(_this)

    spec = request.new_spec()
    vappconfig = spec.new_vAppConfig()

    for operation, items in config.items():
        for item in items:
            prop = vappconfig.new_property()
            prop.set_element_operation(operation)
            info = prop.new_info()
            for k, v in item.items():
                method = getattr(info, "set_element_" + k)
                method(v)
            prop.set_element_info(info)
            properties.append(prop)

    vappconfig.set_element_property(properties)
    spec.set_element_vAppConfig(vappconfig)

    request.set_element_spec(spec)
    task = esx._proxy.ReconfigVM_Task(request)._returnval
    vi_task = VITask(task, esx)

    status = vi_task.wait_for_state(
        [vi_task.STATE_SUCCESS, vi_task.STATE_ERROR])
    esx.disconnect()

    esx = esx_connect(ESX_HOST, ESX_USER, ESX_PASS)
    vm = esx_vm_get(esx, config_json['vapp_net_hostname'])

    spec = request.new_spec()
    spec.set_element_memoryMB(config_json['hw_mem_mb'])

    request.set_element_spec(spec)
    task = esx._proxy.ReconfigVM_Task(request)._returnval
    vi_task = VITask(task, esx)

    status = vi_task.wait_for_state(
        [vi_task.STATE_SUCCESS, vi_task.STATE_ERROR])
    esx.disconnect()

    esx = esx_connect(ESX_HOST, ESX_USER, ESX_PASS)
    vm = esx_vm_get(esx, config_json['vapp_net_hostname'])

    request = VI.ReconfigVM_TaskRequestMsg()
    _this = request.new__this(vm._mor)
    _this.set_attribute_type(vm._mor.get_attribute_type())
    request.set_element__this(_this)

    spec = request.new_spec()
    spec.set_element_numCoresPerSocket(config_json['hw_vcpu'])
    spec.set_element_numCPUs(config_json['hw_vcpu'])

    request.set_element_spec(spec)
    task = esx._proxy.ReconfigVM_Task(request)._returnval
    vi_task = VITask(task, esx)

    status = vi_task.wait_for_state(
        [vi_task.STATE_SUCCESS, vi_task.STATE_ERROR])
    if status == vi_task.STATE_ERROR:
        print ('ERROR: %s' % vi_task.get_error_message())
    else:
        print ('vApp config successful.')
    esx.disconnect()

    # iterate over disk dictionary and add any disks found
    # to the vm configuration - the dict disk number starts with 1, not 0
    # as the disk with number 0 is already inherited from the template
    if 'hw_disk_gb' in config_json:
        for disk in config_json['hw_disk_gb']:
            esx = esx_connect(ESX_HOST, ESX_USER, ESX_PASS)
            vm = esx_vm_get(esx, config_json['vapp_net_hostname'])

            request = VI.ReconfigVM_TaskRequestMsg()
            _this = request.new__this(vm._mor)
            _this.set_attribute_type(vm._mor.get_attribute_type())
            request.set_element__this(_this)

            spec = request.new_spec()

            dc = spec.new_deviceChange()
            dc.Operation = "add"
            dc.FileOperation = "create"

            hd = VI.ns0.VirtualDisk_Def("hd").pyclass()
            hd.Key = -100
            hd.UnitNumber = int(disk)
            hd.CapacityInKB = config_json['hw_disk_gb'][disk] * 1024 * 1024
            hd.ControllerKey = 1000

            backing = VI.ns0.VirtualDiskFlatVer2BackingInfo_Def(
                "backing").pyclass()
            backing.FileName = "%s" % vm.get_property('path').split()[0]
            backing.DiskMode = "persistent"
            backing.Split = False
            backing.WriteThrough = False
            backing.ThinProvisioned = False
            backing.EagerlyScrub = False
            hd.Backing = backing

            dc.Device = hd

            spec.DeviceChange = [dc]
            request.Spec = spec

            request.set_element_spec(spec)
            task = esx._proxy.ReconfigVM_Task(request)._returnval
            vi_task = VITask(task, esx)

            # Wait for task to finis
            status = vi_task.wait_for_state([vi_task.STATE_SUCCESS,
                                             vi_task.STATE_ERROR])
            if status == vi_task.STATE_ERROR:
                print ('ERROR: %s' % vi_task.get_error_message())
            else:
                print ('Disk config successful.')
            esx.disconnect()

    # iterate over network adapter dictionary and add any adapters found
    # to the vm configuration
    for adapter in config_json['hw_vmnet']['adapter']:
        esx = esx_connect(ESX_HOST, ESX_USER, ESX_PASS)
        vm = esx_vm_get(esx, config_json['vapp_net_hostname'])

        request = VI.ReconfigVM_TaskRequestMsg()
        _this = request.new__this(vm._mor)
        _this.set_attribute_type(vm._mor.get_attribute_type())
        request.set_element__this(_this)

        spec = request.new_spec()
        dev_change = spec.new_deviceChange()
        dev_change.set_element_operation('add')
        nic_ctlr = VI.ns0.VirtualVmxnet3_Def('nic_ctlr').pyclass()
        nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def(
            'nic_backing').pyclass()
        nic_backing.set_element_deviceName(
            config_json['hw_vmnet']['adapter'][adapter]['label'])
        nic_ctlr.set_element_addressType('generated')
        nic_ctlr.set_element_backing(nic_backing)
        nic_ctlr.set_element_key(4)
        dev_change.set_element_device(nic_ctlr)

        spec.set_element_deviceChange([dev_change])
        request.set_element_spec(spec)
        ret = esx._proxy.ReconfigVM_Task(request)._returnval

        # Wait for the task to finish
        vi_task = VITask(ret, esx)

        status = vi_task.wait_for_state([vi_task.STATE_SUCCESS,
                                         vi_task.STATE_ERROR])
        if status == vi_task.STATE_ERROR:
            print ('ERROR: %s' % vi_task.get_error_message())
        else:
            print ('Network adapter config successful.')
        esx.disconnect()