def stoplxc(host): """ Stop lxc in proxmox using given hostresource configuration """ #check if container is running and ask user to stop it if not exists(host): raise ValueError("Host template is missing. Please create host template") container = Container.getContainer(HOST_CONTAINER) hostresource = container.loadResource(host) #get proxmox user and hypervisor userresource = proxmoxutil.listuser() if userresource is None: raise ValueError("No proxmox user found!! Please use proxmoxutil command to update user credentials") user = userresource.properties[PROPERTIES_USER] password = userresource.properties[PROPERTIES_PASSWORD] authrealm = userresource.properties[PROPERTIES_AUTHREALM] puser = user+'@'+authrealm primary = proxmoxutil.listprimary() if primary is None: raise ValueError("Primary proxmox hypervisor not found!! Please use proxmoxutil command to update primary hypervisor") hypervisor = primary.properties[PROPERTIES_HYPERVISOR] print "Authenticating "+puser +" on "+ hypervisor proxmox = ProxmoxAPI(hypervisor, user=puser, password=password, verify_ssl=False) node = proxmox.nodes(hostresource.properties[HYPERVISOR]) vmid = int(hostresource.properties[HOSTID]) print "Stopping container" node.lxc(vmid).status.stop.post() time.sleep(30) print "Stopped container"
def proxmox_pci_switcher(name, config=False): """Switcher virtual machine to use one pci resource like GPU""" if config == False and os.name == 'nt': config = '~\\AppData\\Local\\proxmox-pci-switcher\\config.yaml' else: config = '~/.config/proxmox-pci-switcher/config.yaml' with open(os.path.expanduser(config)) as file: proxmox_config = yaml.load(file, Loader=yaml.FullLoader) proxmox = ProxmoxAPI(proxmox_config['proxmox']['host'], user=proxmox_config['proxmox']['user'], password=proxmox_config['proxmox']['password'], verify_ssl=proxmox_config['proxmox']['verify_ssl']) # use first node node = proxmox.nodes.get()[0] target = False for t in proxmox_config['targets']: if name == t['name']: target = t break if target: if proxmox.nodes(node['node']).qemu( target['vmid']).status('current').get()['status'] == "stopped": print(f"power on vm '{name}', see you later!") proxmox.nodes(node['node']).qemu( target['vmid']).status('start').post() else: print(f"target vm '{name}' is running.") else: print(f"vm '{name}', not found in '{config}' file.")
def action(request, id_machine, action, type_machine): """ :id__proxmox: proxmox target :param id_machine: id of desired machine ( int) :param action: start, stop or shutdown :return: """ node = credentials.proxmox.server1.node #type_machine = 'lxc' proxmox = ProxmoxAPI(credentials.proxmox.server1.url_proxmox, user=credentials.proxmox.server1.user, password=credentials.proxmox.server1.password, verify_ssl=False) if type_machine == 'lxc': proxmox.nodes(node).lxc(id_machine).status(action).post() elif type_machine == 'qemu': proxmox.nodes(node).qemu(id_machine).status(action).post() else: print("incorrect value") if action == "stop": sentence = 'l`arret brutal' elif action == "start": sentence = "le démarrage" elif action == "shutdown": sentence = "l`arret doux" """
class Vmbr: """ Vmbr class defining an Proxmox vmbr """ def __init__(self, hostname, user, token_name, token_value): self.proxmox = ProxmoxAPI( hostname, user=user, token_name=token_name, token_value=token_value, verify_ssl=False, ) def create_vmbr(self, number): for i in self.proxmox.nodes.get(): self.proxmox.nodes(i["node"]).network.create( iface="vmbr" + str(number), type="bridge" ) try: self.proxmox.nodes(i["node"]).network.put() except ResourceException as e: print(str(e) + "===> Processing without reloading configuration, you will have to reboot your nodes") print("Vmbr created on " + i["node"])
def get_openvz_hosts(ct_node): isc = ProxmoxAPI(host=ct_node, port=app.config["PROXMOX_PORT"], user=app.config["PROXMOX_USERNAME"] + '@' + app.config["PROXMOX_REALM"], password=app.config["PROXMOX_PASSWORD"], verify_ssl=False) openvz_host_details = {} try: nodes = [] for node in isc.nodes.get(): for vm in isc.nodes(node['node']).openvz.get(): pve_iface = [] for vmcfg in isc.nodes(node['node']).openvz(vm['vmid']).config.get(): if 'net' in vmcfg: pve_iface_name = vmcfg pve_iface_details = isc.nodes(node['node']).openvz(vm['vmid']).config.get()[pve_iface_name] pve_iface.append({ "pve_iface_name": pve_iface_name, "pve_iface_details": pve_iface_details }) nodes.append({ "node": node['node'], "vmid": vm['vmid'], "type": vm['type'], "name": vm['name'], "status": vm['status'], "network_info": pve_iface}) openvz_host_details.update({"status": "Host details", "vms": nodes}) return openvz_host_details except: pass
class HVInfo(): def __init__(self, host, password): self.proxmox=ProxmoxAPI(host, user='******', password=password, verify_ssl=False) def getUsers(self): """ Получить список пользователей """ result = self.proxmox.access.users.get() return result def ACL(self): result = self.proxmox.access.acl.get() return result def getNodes(self): result = self.proxmox.nodes.get() return result def getKVM(self, node): result = self.proxmox.nodes(node).qemu.get() return result def getOpenVZ(self, node): result = self.proxmox.nodes(node).openvz.get() return result
class ProxmoxKvmChaotic(Chaotic): def __init__(self) -> None: super().__init__() log.info(f"Proxmox host: {PROXMOX_API_HOST}") log.info(f"Proxmox user: {PROXMOX_API_USER}") self.pve = ProxmoxAPI(host=PROXMOX_API_HOST, user=PROXMOX_API_USER, password=PROXMOX_API_PASSWORD, verify_ssl=PROXMOX_API_VERIFY_SSL) def action(self) -> None: vms = self.pve.cluster.resources.get(type='vm') denylist = self.configs.get('denylist') or [] vms = [ vm for vm in vms if vm['status'] == "running" and vm['name'] not in denylist ] if vms: vm = random.choice(vms) log.info( f"Choose VM ID={vm['vmid']}, name={vm['name']} on node={vm['node']}" ) min_uptime = self.configs.get('min_uptime') if min_uptime is not None: current = self.pve.nodes(vm['node']).qemu( vm['vmid']).status.current.get() required_uptime = min_uptime * 60 if current['uptime'] < required_uptime: log.info( f"VM {vm['name']} required uptime lower then {min_uptime} min: {current['uptime'] / 60:.2f}, skipping" ) log.info(f"done") return if not self.dry_run: log.info(f"Stopping VM {vm['name']}") self.pve.nodes(vm['node']).qemu( vm['vmid']).status.shutdown.post(forceStop=1) wait_before_restart = int( self.configs.get('wait_before_restart', 60)) log.info(f"Sleeping for {wait_before_restart} seconds") time.sleep(wait_before_restart) log.info(f"Starting VM {vm['name']}") self.pve.nodes(vm['node']).qemu(vm['vmid']).status.start.post() else: log.info("No VMs found") log.info(f"done")
class Proxmox(App): """ Skeleton example app to build other apps off of Args: app_name (str): Name of the app device (list[str]): List of associated device names context (dict): Information about the context in which the App is operating """ def __init__(self, app_name, device, context): App.__init__(self, app_name, device, context) # Required to call superconstructor self.proxmox = ProxmoxAPI(self.host, user=self.device_fields["username"], password=self.device.get_encrypted_field("password"), verify_ssl=False) @action def get_all_nodes(self): return self.proxmox.nodes.get() @action def get_all_vms(self): nodes_vms = [] for node in self.proxmox.nodes.get(): node['vms'] = [] for vm in self.proxmox.nodes(node['node']).openvz.get(): node['vms'].append(vm) nodes_vms.append(node) @action def get_all_vms_for_node(self, node_name): for node in self.proxmox.nodes.get():
def find_vm(self, label): """Find a VM in the Proxmox cluster and return its node and vm proxy objects for extraction of additional data by other methods. @param label: the label of the VM to be compared to the VM's name in Proxmox. @raise CuckooMachineError: if the VM cannot be found.""" proxmox = ProxmoxAPI(self.options.proxmox.hostname, user=self.options.proxmox.username, password=self.options.proxmox.password, verify_ssl=False) # /cluster/resources[type=vm] will give us all VMs no matter which node # they reside on try: vms = proxmox.cluster.resources.get(type="vm") except ResourceException as e: raise CuckooMachineError("Error enumerating VMs: %s" % e) for vm in vms: if vm["name"] == label: # dynamically address # /nodes/<node>/{qemu,lxc,openvz,...}/<vmid> to get handle on # VM node = proxmox.nodes(vm["node"]) hv = node.__getattr__(vm["type"]) vm = hv.__getattr__(str(vm["vmid"])) # remember various request proxies for subsequent actions return vm, node raise CuckooMachineError("Not found")
def ProxmoxMain(request): if not request.user.is_authenticated(): return redirect('/Login?next=%s' % request.path) proxmoxIpAddress = settings.PROXMOX_SERVER_IP_ADDRESS proxmoxUsername = settings.PROXMOX_USER proxmoxPassword = settings.PROXMOX_PASSWORD proxmoxVerifySsl = settings.PROXMOX_VERIFY_SSL proxmox = ProxmoxAPI(proxmoxIpAddress, user=proxmoxUsername, password=proxmoxPassword, verify_ssl=proxmoxVerifySsl) command = request.GET.get('command', 'none') currentNode = request.GET.get('node', 'none') currentVz = request.GET.get('openvz', 'none') currentVm = request.GET.get('qemu', 'none') if command != 'none': return runcommand(request, proxmox) elif currentVz != 'none': vms = getVmDict(proxmox) tasks = getAllServerTasks(proxmox) return render(request, 'OctaHomeProxmox/CT.html', {'links': getSideBar(request, proxmox), 'serverInfo':addWizardVeriables(request, proxmox), 'node':currentNode, 'Vz':vms[currentNode][currentVz], 'StorageDevices':getStorageDetails(proxmox), 'ServerStatuses':getServerStatuses(proxmox), 'tasks':tasks}) elif currentVm != 'none': raise Http404 elif currentNode != 'none': tasks = proxmox.nodes(currentNode).get('tasks') return render(request, 'OctaHomeProxmox/Node.html', {'links': getSideBar(request, proxmox), 'serverInfo':addWizardVeriables(request, proxmox), 'Node':currentNode, 'StorageDevices':getStorageDetails(proxmox), 'ServerStatuses':getServerStatuses(proxmox), 'tasks':tasks}) else: tasks = getAllServerTasks(proxmox) return render(request, 'OctaHomeProxmox/AllNodes.html', {'links': getSideBar(request, proxmox), 'serverInfo':addWizardVeriables(request, proxmox), 'StorageDevices':getStorageDetails(proxmox), 'ServerStatuses':getServerStatuses(proxmox), 'tasks':tasks}) raise Http404
def check(pve: ProxmoxAPI = None, age: int = 7) -> Iterable[NagiosResult]: if not pve: # we need to make the argument itself optional for argh raise RuntimeError('pve parameter missing') cutoff = int((datetime.now() - timedelta(days=age)).timestamp()) for node in pve.nodes.get(): last_failed = 0 last_success = 0 node_fqdn = '.'.join( (node['node'], pve.nodes(node['node']).dns.get()['search'])) for task in pve.nodes(node['node']).tasks.get(typefilter='vzdump'): if task['endtime'] < cutoff: continue if task['status'] == 'OK': last_success = max(last_success, task['endtime']) else: last_failed = max(last_failed, task['endtime']) if last_failed > last_success: yield NagiosResult( ResultCode.CRITICAL, '', # unused at this stage 'Last backup of node {} at {} had errors'.format( node_fqdn, datetime.fromtimestamp(last_failed), ), ) elif not last_success: yield NagiosResult( ResultCode.WARNING, '', # unused at this stage 'Last backup of node {} older than cutoff'.format(node_fqdn, ), ) else: yield NagiosResult( ResultCode.OK, '', # unused at this stage 'Last backup of node {} at {} was successful'.format( node_fqdn, datetime.fromtimestamp(last_success), ), )
def __init__(self, *args, **kwargs): super(CD_DVD, self).__init__(*args,**kwargs) isos = [] proxmox = ProxmoxAPI(secrets.PROXMOX_HOST,user=secrets.PROXMOX_USER,password=secrets.PROXMOX_PASS,verify_ssl=False) for item in proxmox.nodes('proxmox01').storage('NFS-ISOs').content.get(): isos.append(item['volid']) self.fields['iso'] = forms.ChoiceField(choices=[(iso,iso) for iso in isos])
def get_kvm_hosts(ct_node): isc = ProxmoxAPI(host=ct_node, port=app.config["PROXMOX_PORT"], user=app.config["PROXMOX_USERNAME"] + '@' + app.config["PROXMOX_REALM"], password=app.config["PROXMOX_PASSWORD"], verify_ssl=False) for node in isc.nodes.get(): for vm in isc.nodes(node['node']).qemu.get(): print("{0}. {1} => {2}".format(vm['vmid'], vm['name'], vm['status'])) return None
def get_vms_slow(host, *args, **kwargs): """Get and returns a list of all VMs on the PVE cluster, including disks. DON'T USE! This is for reference only. It's slow as hell. About 10 times slower. """ pve = ProxmoxAPI(host, *args, **kwargs) vms = [] for node in pve.nodes.get(): storage = pve.nodes(node['node']).get('storage', content='images') for vm in pve.nodes(node['node']).get('qemu'): vmdisks = [] for ds in storage: vmdisks.extend( pve.nodes(node['node']).storage(ds['storage']).get( 'content', vmid=vm['vmid'])) vm['disks'] = vmdisks vms.append(vm) return vms
def check(pve: ProxmoxAPI = None): if not pve: # we need to make the argument itself optional for argh raise RuntimeError('pve parameter missing') for node in pve.nodes.get(): node_fqdn = '.'.join( (node['node'], pve.nodes(node['node']).dns.get()['search'])) for vm in pve.nodes(node['node']).qemu.get(): vm_config = pve.nodes(node['node']).qemu(vm['vmid']).config.get() onboot = vm_config.get('onboot', 0) if (vm['status'] == 'running') ^ (onboot == 1): yield NagiosResult( code=ResultCode.WARNING, summary='', # unused at this stage details='{} on {} is {} but autostart={}'.format( vm['name'], node_fqdn, vm['status'], onboot, ))
def get_storage(host, *args, **kwargs): """Get and returns a list of all storage active on the PVE cluster. """ pve = ProxmoxAPI(host, *args, **kwargs) storage = [] seen = set() for node in pve.nodes.get(): for ds in pve.nodes(node['node']).get('storage'): if ds['shared'] != 1: ds['node'] = node['node'] elif ds['storage'] in seen: continue else: seen.add(ds['storage']) ds['contents'] = pve.nodes(node['node']).storage( ds['storage']).get('content') storage.append(ds) return storage
def get_nodes(host, *args, **kwargs): """Get and returns a list of all nodes on the PVE cluster. """ pve = ProxmoxAPI(host, *args, **kwargs) nodes = pve.nodes.get() # Add to this list to get more info on node. properties = ['network', 'services'] for node in nodes: for p in properties: node[p] = pve.nodes(node['node']).get(p) return nodes
def __init__(self, *args, **kwargs): super(CD_DVD, self).__init__(*args, **kwargs) isos = [] proxmox = ProxmoxAPI(secrets.PROXMOX_HOST, user=secrets.PROXMOX_USER, password=secrets.PROXMOX_PASS, verify_ssl=False) for item in proxmox.nodes('proxmox01').storage( 'NFS-ISOs').content.get(): isos.append(item['volid']) self.fields['iso'] = forms.ChoiceField(choices=[(iso, iso) for iso in isos])
def rebuildlxc(host): """ Creates lxc in proxmox using given hostresource configuration """ #check if container is running and ask user to stop it if not exists(host): raise ValueError("Host template is missing. Please create host template") container = Container.getContainer(HOST_CONTAINER) hostresource = container.loadResource(host) #get proxmox user and hypervisor userresource = proxmoxutil.listuser() if userresource is None: raise ValueError("No proxmox user found!! Please use proxmoxutil command to update user credentials") user = userresource.properties[PROPERTIES_USER] password = userresource.properties[PROPERTIES_PASSWORD] authrealm = userresource.properties[PROPERTIES_AUTHREALM] puser = user+'@'+authrealm primary = proxmoxutil.listprimary() if primary is None: raise ValueError("Primary proxmox hypervisor not found!! Please use proxmoxutil command to update primary hypervisor") hypervisor = primary.properties[PROPERTIES_HYPERVISOR] print "Authenticating "+puser +" on "+ hypervisor proxmox = ProxmoxAPI(hypervisor, user=puser, password=password, verify_ssl=False) node = proxmox.nodes(hostresource.properties[HYPERVISOR]) hostname = hostresource.properties[HOSTNAME] vmid = int(hostresource.properties[HOSTID]) memory = int(hostresource.properties[PROPERTIES_MEMORY]) swap = int(hostresource.properties[PROPERTIES_SWAP]) interfaces = hostresource.properties[INTERFACES] i=0 netconfig = dict() for interface in interfaces: print "Configuring %s" %interface netconfig["net"+str(i)] = hostresource.properties[interface] i=i+1 print "Reconfiguring LXC with the following parameters:" print "Vmid: %d" %vmid print "Memory: %d" %memory print "Swap: %d" %swap node.lxc(vmid).config.put(memory=memory, swap=swap, **netconfig) print "Reconfiguring LXC....." time.sleep(30)
def main(): module = AnsibleModule( argument_spec=dict(args=dict(type='str', default=None), api_host=dict(required=True), api_user=dict(required=True), api_password=dict(no_log=True), validate_certs=dict(type='bool', default='no'), node=dict(type='str', default='no'), vm_id=dict(type='int', required=True), vm_ip_type=dict(type='str', default='ipv4'), vm_interface=dict(type='str', default='eth0'))) api_user = module.params['api_user'] api_host = module.params['api_host'] api_password = module.params['api_password'] validate_certs = module.params['validate_certs'] node = module.params['node'] vm_id = module.params['vm_id'] vm_ip_type = module.params['vm_ip_type'] vm_interface = module.params['vm_interface'] try: proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs) global VZ_TYPE global PVE_MAJOR_VERSION PVE_MAJOR_VERSION = 3 if proxmox_version(proxmox) < LooseVersion( '4.0') else 4 except Exception as e: module.fail_json( msg='authorization on proxmox cluster failed with exception: %s' % e) try: vm_ip = proxmox.nodes(node).qemu(vm_id).agent( 'network-get-interfaces').get() except Exception as e: module.fail_json( msg='Getting IP for VM with vmid %s failed with exception: %s' % (vm_id, e)) vm_ip_info = [i for i in vm_ip['result'] if i['name'] == vm_interface] vm_ip4_addr = [ i for i in vm_ip_info[0]['ip-addresses'] if i['ip-address-type'] == vm_ip_type ][0]['ip-address'] response = vm_ip4_addr module.exit_json(changed=False, ip=response)
def main(): module = AnsibleModule( argument_spec=dict(args=dict(type='str', default=None), api_host=dict(required=True), api_user=dict(required=True), api_password=dict(no_log=True), validate_certs=dict(type='bool', default='no'), node=dict(type='str', default='no'))) api_user = module.params['api_user'] api_host = module.params['api_host'] api_password = module.params['api_password'] validate_certs = module.params['validate_certs'] node = module.params['node'] try: proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs) global VZ_TYPE global PVE_MAJOR_VERSION PVE_MAJOR_VERSION = 3 if proxmox_version(proxmox) < LooseVersion( '4.0') else 4 except Exception as e: module.fail_json( msg='authorization on proxmox cluster failed with exception: %s' % e) try: node = proxmox.nodes(node) vms = node.qemu.get() except Exception as e: module.fail_json( msg='Getting information for VMs failed with exception: %s' % e) vm_list = [] vm_dict = {} for vm in vms: try: vm_dict[vm['name']] = node.qemu(vm['vmid']).config.get() except Exception as e: module.fail_json( msg= 'Getting information for VM with vmid = %s failed with exception: %s' % (vmid, e)) vm_dict[vm['name']]['vmid'] = vm['vmid'] response = vm_dict module.exit_json(changed=False, vm_configs=response)
def get_vms(host, *args, **kwargs): """Get and returns a list of all VMs on the PVE cluster, including disks. """ pve = ProxmoxAPI(host, *args, **kwargs) vms = [] all_disks = [] for node in pve.nodes.get(): vms.extend(pve.nodes(node['node']).get('qemu')) # Loop through storage with content = images only. for ds in pve.nodes(node['node']).get('storage', content='images'): all_disks.extend( pve.nodes(node['node']).storage(ds['storage']).get('content')) all_disks = dedup(all_disks, 'volid') for vm in vms: vmdisks = [] for disk in all_disks: if int(disk['vmid']) == int(vm['vmid']): vmdisks.append(disk) vm['disks'] = vmdisks return vms
def create_vm(request): vm_form = VM_Form(data=request.POST or None) drive_form = CD_DVD(data=request.POST or None) disk_form = Disk(data=request.POST or None) cpu_form = CPU(data=request.POST or None) net_form = Network(data=request.POST or None) if request.method == 'POST': if vm_form.is_valid() and drive_form.is_valid() and disk_form.is_valid() and cpu_form.is_valid() and net_form.is_valid(): if '_request' in request.POST: # Request VM use = User.objects.get_or_create(username='******') request_vm(vm_form, drive_form, disk_form, cpu_form, net_form,use[0]) return redirect('/') if check_limits(vm_form.cleaned_data['memory'],cpu_form.cleaned_data['cores'],disk_form.cleaned_data['size']): return render(request, 'create.html',{'vm_form': vm_form,'drive_form': drive_form,'disk_form': disk_form,'cpu_form': cpu_form,'net_form': net_form,'request_vm': True}) proxmox = ProxmoxAPI(secrets.PROXMOX_HOST,user=secrets.PROXMOX_USER,password=secrets.PROXMOX_PASS,verify_ssl=False) node = proxmox.nodes(vm_form.cleaned_data['node']) vm_id = int(proxmox.cluster.nextid.get()) testdata = node.qemu.create(vmid=vm_id, name=vm_form.cleaned_data['name'], ostype=vm_form.cleaned_data['ostype'], ide2=drive_form.cleaned_data['iso']+',media=cdrom', ide0='ceph_pool:'+str(disk_form.cleaned_data['size'])+',format='+disk_form.cleaned_data['disk_format'], sockets=1, cores=cpu_form.cleaned_data['cores'], numa=0, pool=secrets.PROXMOX_POOL, memory=vm_form.cleaned_data['memory'], net0=net_form.cleaned_data['model']+',bridge='+net_form.cleaned_data['bridge']) # Testing use = User.objects.get_or_create(username='******') vm = VM(user=use[0],vmid=vm_id,name=vm_form.cleaned_data['name'],nodename=vm_form.cleaned_data['node']) vm.save() return redirect('/manage/') return render(request, 'create.html',{'vm_form': vm_form,'drive_form': drive_form,'disk_form': disk_form,'cpu_form': cpu_form,'net_form': net_form})
def connect(server_url: str, username: str, *, password: Optional[str] = None, token_name: Optional[str] = None, token_value: Optional[str] = None, verify_ssl: bool = False, use_ssh: bool = False): kwargs = { "host": server_url, "user": username, "password": password, "backend": "ssh_paramiko" if use_ssh else "https" } if token_name and token_value and not use_ssh: kwargs["token_name"] = token_name kwargs["token_value"] = token_value if not use_ssh: kwargs['verify_ssl'] = verify_ssl api = ProxmoxAPI(**kwargs) # check if API is working. try: nodes = api.nodes().get() if not nodes: raise Exception( f"Failed to connect to Proxmox server '{server_url}@{username}' OR empty Proxmox cluster (no nodes found), verify credentials" ) except proxmoxer.backends.https.AuthenticationError: raise Exception( f"Failed to connect to Proxmox server '{server_url}@{username}', verify credentials (authentication error)" ) return api
def delete(cfg: Config) -> None: proxmox: ProxmoxAPI host = cfg.proxmox_hostname user = cfg.proxmox_username verify_ssl = cfg.proxmox_verify_ssl node_name = cfg.proxmox_node_name vm_id = cfg.vm_id if "proxmox_token_name" in dir(cfg) and "proxmox_token_value" in dir(cfg): print("Using API token") token_name = cfg.proxmox_token_name token_value = cfg.proxmox_token_value proxmox = ProxmoxAPI(host, user=user, token_name=token_name, token_value=token_value, verify_ssl=verify_ssl) else: print("Using username and password") password = cfg.proxmox_password proxmox = ProxmoxAPI(host, user=user, password=password, verify_ssl=verify_ssl) try: vm = proxmox.nodes(node_name).qemu(vm_id) vm_config = vm.config().get() vm.delete() print(f"Successfully deleted VM template {vm_id}") # if "template" in vm_config and vm_config['template'] == 1: # else: # raise ResourceException(f"Provided VM id({vm_id}) is not a template") except ResourceException as err: print(f"Nothing to delete")
# print node #for vm in node: # print "{0}. {1} => {2}" .format(vm['vmid'], vm['name'], vm['status']) # node.qemu.create(template='local:vztmpl/debian-6-turnkey-core_12.0-1_i386.tar.gz', # hostname='turnkey', # storage='local', # memory=512, # swap=512, # cpus=1, # disk=4, # password='******', # ip_address='10.0.0.202') # for node in proxmox.nodes.get(): # print node target_node = proxmox.nodes(node_name) target_qemu = target_node.qemu(template_id) response = target_qemu.clone.create(newid=vm_id, full=1, name=new_hostname, format='qcow2', storage='local') print response status = target_node.tasks(response).status.get() while status['status'] == 'running': status = target_node.tasks(response).status.get() print 'Creating VM: ' + status['status'] time.sleep(3) if status['exitstatus'] == 'OK':
class OVZ(): def __init__(self): self.proxmox = None self.node = None self.password = None self.log = [] def connect(self, hostname, password, nodename): self.proxmox = ProxmoxAPI(hostname, user='******', password=password, verify_ssl=False) self.node = self.proxmox.nodes(nodename) self.password = pwgen() def create(self, vmid=100, template='local:vztmpl/centos-6-x86_64.tar.gz', hostname='newvm', mem=512, hdd=5, cpus=1, iplist=[], nameserver='8.8.8.8'): if not self.isVMExist(vmid): try: self.node.openvz.create(vmid=vmid, ostemplate=template, hostname=hostname, storage='local', memory=mem, swap=0, cpus=cpus, disk=hdd, password=self.password, ip_address=iplist, nameserver=nameserver) except: return False # Время на распаковку архива контейнера time.sleep(30) self.node.openvz(vmid).config.set(onboot=1) return True def startvm(self, vmid=100): if self.isVMExist(vmid): self.node.openvz(vmid).status.start.post() def stopvm(self, vmid=100): if self.isVMExist(vmid): self.node.openvz(vmid).status.stop.post() def deletevm(self, vmid=100): if self.isVMExist(vmid): self.node.openvz(vmid).delete() def isVMExist(self, vmid=100): result = False for item in self.node.openvz.get(): if item['vmid'] == vmid: result=True for item in self.node.qemu.get(): if item['vmid'] == vmid: result=True return result def machines(self): result = self.node.openvz.get() return result def task(self, xml): """ Пакетный режим """ result=[] self.log.append('OVZ task begin') for item in xml: self.log.append(item.tag) if item.tag == 'connect': self.connect(item.attrib['hostname'], item.attrib['password'], item.attrib['node']) elif item.tag == 'create': iplist=[] for ip in item: iplist.append(ip.text) if self.create(vmid=item.attrib['vmid'], template='local:vztmpl/' + item.attrib['template'], hostname=item.attrib['hostname'], mem=item.attrib['mem'], hdd=item.attrib['hdd'], cpus=item.attrib['cpu'], iplist=iplist, nameserver='46.17.40.200 46.17.46.200'): result.append('<result type="openvz" status="error" description="Ошибка создания"/>') else: time.sleep(10) result.append('<result type="openvz" status="ready" password="******"/>') elif item.tag == 'delete': self.deletevm(vmid=xml.attrib['vmid']) elif item.tag == 'start': self.startvm(vmid=item.attrib['vmid']) elif item.tag == 'wait': time.sleep(300) self.log.append('OVZ task end') return result
def main(): module = AnsibleModule( argument_spec=dict( acpi=dict(type='bool', default='yes'), agent=dict(type='bool'), args=dict(type='str', default=None), api_host=dict(required=True), api_user=dict(required=True), api_password=dict(no_log=True), autostart=dict(type='bool', default='no'), balloon=dict(type='int', default=0), bios=dict(choices=['seabios', 'ovmf']), boot=dict(type='str', default='cnd'), bootdisk=dict(type='str'), clone=dict(type='str', default=None), cores=dict(type='int', default=1), cpu=dict(type='str', default='kvm64'), cpulimit=dict(type='int'), cpuunits=dict(type='int', default=1000), delete=dict(type='str', default=None), description=dict(type='str'), digest=dict(type='str'), force=dict(type='bool', default=None), format=dict(type='str', default='qcow2', choices=['cloop', 'cow', 'qcow', 'qcow2', 'qed', 'raw', 'vmdk']), freeze=dict(type='bool'), full=dict(type='bool', default='yes'), hostpci=dict(type='dict'), hotplug=dict(type='str'), hugepages=dict(choices=['any', '2', '1024']), ide=dict(type='dict', default=None), keyboard=dict(type='str'), kvm=dict(type='bool', default='yes'), localtime=dict(type='bool'), lock=dict(choices=['migrate', 'backup', 'snapshot', 'rollback']), machine=dict(type='str'), memory=dict(type='int', default=512), migrate_downtime=dict(type='int'), migrate_speed=dict(type='int'), name=dict(type='str'), net=dict(type='dict'), newid=dict(type='int', default=None), node=dict(), numa=dict(type='dict'), numa_enabled=dict(type='bool'), onboot=dict(type='bool', default='yes'), ostype=dict(default='l26', choices=['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'l24', 'l26', 'solaris']), parallel=dict(type='dict'), pool=dict(type='str'), protection=dict(type='bool'), reboot=dict(type='bool'), revert=dict(type='str', default=None), sata=dict(type='dict'), scsi=dict(type='dict'), scsihw=dict(choices=['lsi', 'lsi53c810', 'virtio-scsi-pci', 'virtio-scsi-single', 'megasas', 'pvscsi']), serial=dict(type='dict'), shares=dict(type='int'), skiplock=dict(type='bool'), smbios=dict(type='str'), snapname=dict(type='str'), sockets=dict(type='int', default=1), startdate=dict(type='str'), startup=dict(), state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted', 'current']), storage=dict(type='str'), tablet=dict(type='bool', default='no'), target=dict(type='str'), tdf=dict(type='bool'), template=dict(type='bool', default='no'), timeout=dict(type='int', default=30), update=dict(type='bool', default='no'), validate_certs=dict(type='bool', default='no'), vcpus=dict(type='int', default=None), vga=dict(default='std', choices=['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4']), virtio=dict(type='dict', default=None), vmid=dict(type='int', default=None), watchdog=dict(), ), mutually_exclusive=[('delete', 'revert'), ('delete', 'update'), ('revert', 'update'), ('clone', 'update'), ('clone', 'delete'), ('clone', 'revert')], required_one_of=[('name', 'vmid',)], required_if=[('state', 'present', ['node'])] ) if not HAS_PROXMOXER: module.fail_json(msg='proxmoxer required for this module') api_user = module.params['api_user'] api_host = module.params['api_host'] api_password = module.params['api_password'] clone = module.params['clone'] cpu = module.params['cpu'] cores = module.params['cores'] delete = module.params['delete'] memory = module.params['memory'] name = module.params['name'] newid = module.params['newid'] node = module.params['node'] revert = module.params['revert'] sockets = module.params['sockets'] state = module.params['state'] timeout = module.params['timeout'] update = bool(module.params['update']) vmid = module.params['vmid'] validate_certs = module.params['validate_certs'] # If password not set get it from PROXMOX_PASSWORD env if not api_password: try: api_password = os.environ['PROXMOX_PASSWORD'] except KeyError as e: module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable') try: proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs) global VZ_TYPE global PVE_MAJOR_VERSION PVE_MAJOR_VERSION = 3 if float(proxmox.version.get()['version']) < 4.0 else 4 except Exception as e: module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e) # If vmid not set get the Next VM id from ProxmoxAPI # If vm name is set get the VM id from ProxmoxAPI if not vmid: if state == 'present' and (not update and not clone) and (not delete and not revert): try: vmid = get_nextvmid(module, proxmox) except Exception as e: module.fail_json(msg="Can't get the next vmid for VM {0} automatically. Ensure your cluster state is good".format(name)) else: try: if not clone: vmid = get_vmid(proxmox, name)[0] else: vmid = get_vmid(proxmox, clone)[0] except Exception as e: if not clone: module.fail_json(msg="VM {0} does not exist in cluster.".format(name)) else: module.fail_json(msg="VM {0} does not exist in cluster.".format(clone)) if clone is not None: if get_vmid(proxmox, name): module.exit_json(changed=False, msg="VM with name <%s> already exists" % name) if vmid is not None: vm = get_vm(proxmox, vmid) if not vm: module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid) if not newid: try: newid = get_nextvmid(module, proxmox) except Exception as e: module.fail_json(msg="Can't get the next vmid for VM {0} automatically. Ensure your cluster state is good".format(name)) else: vm = get_vm(proxmox, newid) if vm: module.exit_json(changed=False, msg="vmid %s with VM name %s already exists" % (newid, name)) if delete is not None: try: settings(module, proxmox, vmid, node, name, timeout, delete=delete) module.exit_json(changed=True, msg="Settings has deleted on VM {0} with vmid {1}".format(name, vmid)) except Exception as e: module.fail_json(msg='Unable to delete settings on VM {0} with vmid {1}: '.format(name, vmid) + str(e)) elif revert is not None: try: settings(module, proxmox, vmid, node, name, timeout, revert=revert) module.exit_json(changed=True, msg="Settings has reverted on VM {0} with vmid {1}".format(name, vmid)) except Exception as e: module.fail_json(msg='Unable to revert settings on VM {0} with vmid {1}: Maybe is not a pending task... '.format(name, vmid) + str(e)) if state == 'present': try: if get_vm(proxmox, vmid) and not (update or clone): module.exit_json(changed=False, msg="VM with vmid <%s> already exists" % vmid) elif get_vmid(proxmox, name) and not (update or clone): module.exit_json(changed=False, msg="VM with name <%s> already exists" % name) elif not (node, name): module.fail_json(msg='node, name is mandatory for creating/updating vm') elif not node_check(proxmox, node): module.fail_json(msg="node '%s' does not exist in cluster" % node) create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sockets, timeout, update, acpi=module.params['acpi'], agent=module.params['agent'], autostart=module.params['autostart'], balloon=module.params['balloon'], bios=module.params['bios'], boot=module.params['boot'], bootdisk=module.params['bootdisk'], cpulimit=module.params['cpulimit'], cpuunits=module.params['cpuunits'], description=module.params['description'], digest=module.params['digest'], force=module.params['force'], freeze=module.params['freeze'], hostpci=module.params['hostpci'], hotplug=module.params['hotplug'], hugepages=module.params['hugepages'], ide=module.params['ide'], keyboard=module.params['keyboard'], kvm=module.params['kvm'], localtime=module.params['localtime'], lock=module.params['lock'], machine=module.params['machine'], migrate_downtime=module.params['migrate_downtime'], migrate_speed=module.params['migrate_speed'], net=module.params['net'], numa=module.params['numa'], numa_enabled=module.params['numa_enabled'], onboot=module.params['onboot'], ostype=module.params['ostype'], parallel=module.params['parallel'], pool=module.params['pool'], protection=module.params['protection'], reboot=module.params['reboot'], sata=module.params['sata'], scsi=module.params['scsi'], scsihw=module.params['scsihw'], serial=module.params['serial'], shares=module.params['shares'], skiplock=module.params['skiplock'], smbios1=module.params['smbios'], snapname=module.params['snapname'], startdate=module.params['startdate'], startup=module.params['startup'], tablet=module.params['tablet'], target=module.params['target'], tdf=module.params['tdf'], template=module.params['template'], vcpus=module.params['vcpus'], vga=module.params['vga'], virtio=module.params['virtio'], watchdog=module.params['watchdog']) if not clone: get_vminfo(module, proxmox, node, vmid, ide=module.params['ide'], net=module.params['net'], sata=module.params['sata'], scsi=module.params['scsi'], virtio=module.params['virtio']) if update: module.exit_json(changed=True, msg="VM %s with vmid %s updated" % (name, vmid)) elif clone is not None: module.exit_json(changed=True, msg="VM %s with newid %s cloned from vm with vmid %s" % (name, newid, vmid)) else: module.exit_json(changed=True, msg="VM %s with vmid %s deployed" % (name, vmid), **results) except Exception as e: if update: module.fail_json(msg="Unable to update vm {0} with vmid {1}=".format(name, vmid) + str(e)) elif clone is not None: module.fail_json(msg="Unable to clone vm {0} from vmid {1}=".format(name, vmid) + str(e)) else: module.fail_json(msg="creation of %s VM %s with vmid %s failed with exception=%s" % (VZ_TYPE, name, vmid, e)) elif state == 'started': try: vm = get_vm(proxmox, vmid) if not vm: module.fail_json(msg='VM with vmid <%s> does not exist in cluster' % vmid) if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running': module.exit_json(changed=False, msg="VM %s is already running" % vmid) if start_vm(module, proxmox, vm, vmid, timeout): module.exit_json(changed=True, msg="VM %s started" % vmid) except Exception as e: module.fail_json(msg="starting of VM %s failed with exception: %s" % (vmid, e)) elif state == 'stopped': try: vm = get_vm(proxmox, vmid) if not vm: module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid) if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped': module.exit_json(changed=False, msg="VM %s is already stopped" % vmid) if stop_vm(module, proxmox, vm, vmid, timeout, force=module.params['force']): module.exit_json(changed=True, msg="VM %s is shutting down" % vmid) except Exception as e: module.fail_json(msg="stopping of VM %s failed with exception: %s" % (vmid, e)) elif state == 'restarted': try: vm = get_vm(proxmox, vmid) if not vm: module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid) if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped': module.exit_json(changed=False, msg="VM %s is not running" % vmid) if stop_vm(module, proxmox, vm, vmid, timeout, force=module.params['force']) and start_vm(module, proxmox, vm, vmid, timeout): module.exit_json(changed=True, msg="VM %s is restarted" % vmid) except Exception as e: module.fail_json(msg="restarting of VM %s failed with exception: %s" % (vmid, e)) elif state == 'absent': try: vm = get_vm(proxmox, vmid) if not vm: module.exit_json(changed=False, msg="VM %s does not exist" % vmid) if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running': module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid) taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE).delete(vmid) while timeout: if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'): module.exit_json(changed=True, msg="VM %s removed" % vmid) timeout -= 1 if timeout == 0: module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s' % proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1]) time.sleep(1) except Exception as e: module.fail_json(msg="deletion of VM %s failed with exception: %s" % (vmid, e)) elif state == 'current': status = {} try: vm = get_vm(proxmox, vmid) if not vm: module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid) current = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] status['status'] = current if status: module.exit_json(changed=False, msg="VM %s with vmid = %s is %s" % (name, vmid, current), **status) except Exception as e: module.fail_json(msg="Unable to get vm {0} with vmid = {1} status: ".format(name, vmid) + str(e))
def main(): ''' Example configuration file: apiuser: "******" apipass: "******" cluster: - "cluster1" - "cluster2" ''' conf_path = os.path.dirname(os.path.abspath(__file__)) + r'/../conf/proxmox.yml' try: pve_config = yaml.load(open(conf_path, 'r')) pve_apiuser = pve_config['apiuser'] pve_apipass = pve_config['apipass'] pve_cluster = pve_config['cluster'] except: print('{0} ERROR: Unable to load configuration from {1}'.format( str(datetime.now()), conf_path)) return 1 parser = argparse.ArgumentParser(description='Backup a VM by name') parser.add_argument('vmname', nargs='+', help='VM to Backup') parser.add_argument('--target', required=True, help='Backup Location') parser.add_argument('--notify', required=True, help='Notification Email Address') args = parser.parse_args() # Track all VMs that are backed up backup_list = set() for cluster in pve_cluster: try: pve = ProxmoxAPI(cluster, user=pve_apiuser, password=pve_apipass, verify_ssl=False) except: print('{0} ERROR: Unable to access cluster {1}. Skipping!'.format( str(datetime.now()), cluster)) continue for node in pve.nodes.get(): target_exists = False # Track all VMs backed up on a node backup_list_node = set() backup_list_node_vmid = set() for target in pve.nodes(node['node']).storage.get(): if target['storage'] == args.target: target_exists = True if target_exists == True: for vm in pve.nodes(node['node']).qemu.get(): if vm['name'] in args.vmname: backup_list.add(vm['name']) backup_list_node.add(vm['name']) backup_list_node_vmid.add(vm['vmid']) if len(backup_list_node) >= 1: backup_vmname = str() backup_vmid = str() for vmid in backup_list_node_vmid: backup_vmid = backup_vmid + '{0},'.format(vmid) for name in backup_list_node: backup_vmname = backup_vmname + ' {0}'.format(name) try: print('{0} INFO: Starting backup(s) for {1} on node {2}'.format( str(datetime.now()), backup_vmname.strip().upper(), node['node'])) pve.nodes(node['node']).vzdump.create( vmid=backup_vmid.rstrip(','), storage=args.target, mode='snapshot', compress='lzo', mailto=args.notify, mailnotification='always') except: print('{0} ERROR: Failed to submit backup job to node {1}. Skipping!'.format( str(datetime.now()), node['node'])) else: print('{0} WARN: Backup target not found on node {1}'.format( str(datetime.now()), node['node'])) for vm in args.vmname: if vm not in backup_list: print('{0} ERROR: VM {1} backup request failed'.format( str(datetime.now()), vm))
node_filter = cli_options.node balance_map = {} filtered_balance_map = {} proxmox = ProxmoxAPI(PROXMOX['HOST'], user=PROXMOX['USER'], password=PROXMOX['PASSWORD'], verify_ssl=False) for node in proxmox.nodes.get(): n_name = node['node'] balance_map[n_name] = {} if node_filter is not None and n_name == node_filter: filtered_balance_map[n_name] = {} for vm in proxmox.nodes(node['node']).qemu.get(): if vm['status'] == 'running': try: cluster = get_cluster(vm['name']) if cluster in balance_map[n_name]: balance_map[n_name][cluster] += 1 else: balance_map[n_name][cluster] = 1 if n_name == node_filter: if cluster in filtered_balance_map[n_name]: filtered_balance_map[n_name][cluster] += 1 else: filtered_balance_map[n_name][cluster] = 1 except ValueError: pass
onboot = int(module.params['onboot']), cpuunits = module.params['cpuunits'], nameserver = module.params['nameserver'], searchdomain = module.params['searchdomain'], force = int(module.params['force'])) module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmid, module.params['ostemplate'])) except Exception, e: module.fail_json(msg="creation of %s VM %s failed with exception: %s" % ( VZ_TYPE, vmid, e )) elif state == 'started': try: vm = get_instance(proxmox, vmid) if not vm: module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid) if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running': module.exit_json(changed=False, msg="VM %s is already running" % vmid) if start_instance(module, proxmox, vm, vmid, timeout): module.exit_json(changed=True, msg="VM %s started" % vmid) except Exception, e: module.fail_json(msg="starting of VM %s failed with exception: %s" % ( vmid, e )) elif state == 'stopped': try: vm = get_instance(proxmox, vmid) if not vm: module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid) if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted': if module.params['force']:
def main(): module = AnsibleModule( argument_spec=dict( api_host=dict(required=True), api_password=dict(no_log=True, fallback=(env_fallback, ['PROXMOX_PASSWORD'])), api_token_id=dict(no_log=True), api_token_secret=dict(no_log=True), api_user=dict(required=True), vmid=dict(type='int', required=False), validate_certs=dict(type='bool', default=False), node=dict(), pool=dict(), password=dict(no_log=True), hostname=dict(), ostemplate=dict(), disk=dict(type='str'), cores=dict(type='int'), cpus=dict(type='int'), memory=dict(type='int'), swap=dict(type='int'), netif=dict(type='dict'), mounts=dict(type='dict'), ip_address=dict(), onboot=dict(type='bool'), features=dict(type='list', elements='str'), storage=dict(default='local'), cpuunits=dict(type='int'), nameserver=dict(), searchdomain=dict(), timeout=dict(type='int', default=30), force=dict(type='bool', default=False), purge=dict(type='bool', default=False), state=dict(default='present', choices=[ 'present', 'absent', 'stopped', 'started', 'restarted' ]), pubkey=dict(type='str', default=None), unprivileged=dict(type='bool', default=False), description=dict(type='str'), hookscript=dict(type='str'), proxmox_default_behavior=dict( type='str', default='no_defaults', choices=['compatibility', 'no_defaults']), clone=dict(type='int'), clone_type=dict(default='opportunistic', choices=['full', 'linked', 'opportunistic']), ), required_if=[ ('state', 'present', ['node', 'hostname']), ( 'state', 'present', ('clone', 'ostemplate'), True ), # Require one of clone and ostemplate. Together with mutually_exclusive this ensures that we # either clone a container or create a new one from a template file. ], required_together=[('api_token_id', 'api_token_secret')], required_one_of=[('api_password', 'api_token_id')], mutually_exclusive=[ ('clone', 'ostemplate') ], # Creating a new container is done either by cloning an existing one, or based on a template. ) if not HAS_PROXMOXER: module.fail_json(msg='proxmoxer required for this module') state = module.params['state'] api_host = module.params['api_host'] api_password = module.params['api_password'] api_token_id = module.params['api_token_id'] api_token_secret = module.params['api_token_secret'] api_user = module.params['api_user'] vmid = module.params['vmid'] validate_certs = module.params['validate_certs'] node = module.params['node'] disk = module.params['disk'] cpus = module.params['cpus'] memory = module.params['memory'] swap = module.params['swap'] storage = module.params['storage'] hostname = module.params['hostname'] if module.params['ostemplate'] is not None: template_store = module.params['ostemplate'].split(":")[0] timeout = module.params['timeout'] clone = module.params['clone'] if module.params['proxmox_default_behavior'] == 'compatibility': old_default_values = dict( disk="3", cores=1, cpus=1, memory=512, swap=0, onboot=False, cpuunits=1000, ) for param, value in old_default_values.items(): if module.params[param] is None: module.params[param] = value auth_args = {'user': api_user} if not api_token_id: auth_args['password'] = api_password else: auth_args['token_name'] = api_token_id auth_args['token_value'] = api_token_secret try: proxmox = ProxmoxAPI(api_host, verify_ssl=validate_certs, **auth_args) global VZ_TYPE VZ_TYPE = 'openvz' if proxmox_version(proxmox) < LooseVersion( '4.0') else 'lxc' except Exception as e: module.fail_json( msg='authorization on proxmox cluster failed with exception: %s' % e) # If vmid not set get the Next VM id from ProxmoxAPI # If hostname is set get the VM id from ProxmoxAPI if not vmid and state == 'present': vmid = get_nextvmid(module, proxmox) elif not vmid and hostname: hosts = get_vmid(proxmox, hostname) if len(hosts) == 0: module.fail_json( msg= "Vmid could not be fetched => Hostname doesn't exist (action: %s)" % state) vmid = hosts[0] elif not vmid: module.exit_json( changed=False, msg="Vmid could not be fetched for the following action: %s" % state) # Create a new container if state == 'present' and clone is None: try: if get_instance(proxmox, vmid) and not module.params['force']: module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid) # If no vmid was passed, there cannot be another VM named 'hostname' if not module.params['vmid'] and get_vmid( proxmox, hostname) and not module.params['force']: module.exit_json( changed=False, msg= "VM with hostname %s already exists and has ID number %s" % (hostname, get_vmid(proxmox, hostname)[0])) elif not node_check(proxmox, node): module.fail_json(msg="node '%s' not exists in cluster" % node) elif not content_check(proxmox, node, module.params['ostemplate'], template_store): module.fail_json( msg="ostemplate '%s' not exists on node %s and storage %s" % (module.params['ostemplate'], node, template_store)) except Exception as e: module.fail_json( msg= "Pre-creation checks of {VZ_TYPE} VM {vmid} failed with exception: {e}" .format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e)) try: create_instance( module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, clone, cores=module.params['cores'], pool=module.params['pool'], password=module.params['password'], hostname=module.params['hostname'], ostemplate=module.params['ostemplate'], netif=module.params['netif'], mounts=module.params['mounts'], ip_address=module.params['ip_address'], onboot=ansible_to_proxmox_bool(module.params['onboot']), cpuunits=module.params['cpuunits'], nameserver=module.params['nameserver'], searchdomain=module.params['searchdomain'], force=ansible_to_proxmox_bool(module.params['force']), pubkey=module.params['pubkey'], features=",".join(module.params['features']) if module.params['features'] is not None else None, unprivileged=ansible_to_proxmox_bool( module.params['unprivileged']), description=module.params['description'], hookscript=module.params['hookscript']) module.exit_json(changed=True, msg="Deployed VM %s from template %s" % (vmid, module.params['ostemplate'])) except Exception as e: module.fail_json( msg="Creation of %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e)) # Clone a container elif state == 'present' and clone is not None: try: if get_instance(proxmox, vmid) and not module.params['force']: module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid) # If no vmid was passed, there cannot be another VM named 'hostname' if not module.params['vmid'] and get_vmid( proxmox, hostname) and not module.params['force']: module.exit_json( changed=False, msg= "VM with hostname %s already exists and has ID number %s" % (hostname, get_vmid(proxmox, hostname)[0])) if not get_instance(proxmox, clone): module.exit_json(changed=False, msg="Container to be cloned does not exist") except Exception as e: module.fail_json( msg= "Pre-clone checks of {VZ_TYPE} VM {vmid} failed with exception: {e}" .format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e)) try: create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, clone) module.exit_json(changed=True, msg="Cloned VM %s from %s" % (vmid, clone)) except Exception as e: module.fail_json(msg="Cloning %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e)) elif state == 'started': try: vm = get_instance(proxmox, vmid) if not vm: module.fail_json( msg='VM with vmid = %s not exists in cluster' % vmid) if getattr( proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running': module.exit_json(changed=False, msg="VM %s is already running" % vmid) if start_instance(module, proxmox, vm, vmid, timeout): module.exit_json(changed=True, msg="VM %s started" % vmid) except Exception as e: module.fail_json( msg="starting of VM %s failed with exception: %s" % (vmid, e)) elif state == 'stopped': try: vm = get_instance(proxmox, vmid) if not vm: module.fail_json( msg='VM with vmid = %s not exists in cluster' % vmid) if getattr( proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted': if module.params['force']: if umount_instance(module, proxmox, vm, vmid, timeout): module.exit_json(changed=True, msg="VM %s is shutting down" % vmid) else: module.exit_json( changed=False, msg=("VM %s is already shutdown, but mounted. " "You can use force option to umount it.") % vmid) if getattr( proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped': module.exit_json(changed=False, msg="VM %s is already shutdown" % vmid) if stop_instance(module, proxmox, vm, vmid, timeout, force=module.params['force']): module.exit_json(changed=True, msg="VM %s is shutting down" % vmid) except Exception as e: module.fail_json( msg="stopping of VM %s failed with exception: %s" % (vmid, e)) elif state == 'restarted': try: vm = get_instance(proxmox, vmid) if not vm: module.fail_json( msg='VM with vmid = %s not exists in cluster' % vmid) if (getattr( proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped' or getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted'): module.exit_json(changed=False, msg="VM %s is not running" % vmid) if (stop_instance(module, proxmox, vm, vmid, timeout, force=module.params['force']) and start_instance(module, proxmox, vm, vmid, timeout)): module.exit_json(changed=True, msg="VM %s is restarted" % vmid) except Exception as e: module.fail_json( msg="restarting of VM %s failed with exception: %s" % (vmid, e)) elif state == 'absent': try: vm = get_instance(proxmox, vmid) if not vm: module.exit_json(changed=False, msg="VM %s does not exist" % vmid) if getattr( proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running': module.exit_json( changed=False, msg="VM %s is running. Stop it before deletion." % vmid) if getattr( proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted': module.exit_json( changed=False, msg= "VM %s is mounted. Stop it with force option before deletion." % vmid) delete_params = {} if module.params['purge']: delete_params['purge'] = 1 taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE).delete(vmid, **delete_params) while timeout: if (proxmox.nodes( vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and proxmox.nodes(vm[0]['node']).tasks( taskid).status.get()['exitstatus'] == 'OK'): module.exit_json(changed=True, msg="VM %s removed" % vmid) timeout -= 1 if timeout == 0: module.fail_json( msg= 'Reached timeout while waiting for removing VM. Last line in task before timeout: %s' % proxmox.nodes(vm[0]['node']).tasks( taskid).log.get()[:1]) time.sleep(1) except Exception as e: module.fail_json( msg="deletion of VM %s failed with exception: %s" % (vmid, to_native(e)))
onboot = int(module.params['onboot']), cpuunits = module.params['cpuunits'], nameserver = module.params['nameserver'], searchdomain = module.params['searchdomain'], force = int(module.params['force'])) module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmid, module.params['ostemplate'])) except Exception, e: module.fail_json(msg="creation of VM %s failed with exception: %s" % ( vmid, e )) elif state == 'started': try: vm = get_instance(proxmox, vmid) if not vm: module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid) if proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'running': module.exit_json(changed=False, msg="VM %s is already running" % vmid) if start_instance(module, proxmox, vm, vmid, timeout): module.exit_json(changed=True, msg="VM %s started" % vmid) except Exception, e: module.fail_json(msg="starting of VM %s failed with exception: %s" % ( vmid, e )) elif state == 'stopped': try: vm = get_instance(proxmox, vmid) if not vm: module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid) if proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'mounted': if module.params['force']:
class ProxmoxBroker(object): def __init__(self, backend, host, user, **kwargs): """A wrapper object for ProxmoxAPI. :param backend: the backend of proxmoxer :param host: the host of proxmox server :param user: the api user name of proxmox server (default 'root') :param kwargs: other args ProxmoxAPI accepted :returns: ProxmoxBroker -- ProxmoxBroker object for module usage. """ # here timeout is task timeout, not connection timeout self.timeout = kwargs.pop('timeout', 30) self.backend = backend self.proxmox = ProxmoxAPI(host, backend=backend, user=user, **kwargs) def _proxmox_node(self, node): """Get ProxmoxResource object for given node. :param node: the node name :returns: ProxmoxResource -- ProxmoxResource object for given node. """ return self.proxmox.nodes(node) def _is_int(self, string): """Verify if given string could be convert to integer. :param string: the given string :returns: bool -- if given string could be convert to integer. """ try: int(string) return True except ValueError: return False def _is_node_valid(self, node): """Verify if given node could be found in proxmox cluster. :param string: the node name :returns: bool -- if given node could be found in proxmox cluster. """ for nd in self.proxmox.nodes.get(): if nd['node'] == node: return True return False def _get_snapshot(self, node, vmid, snapname): """Get snapshot resource of given instance. :param node: the node name of instance :param vmid: the vmid of instance :param snapname: the snapshot name :returns: ProxmoxResource -- snampshot resource of the instance. """ for snap in self._proxmox_node(node).qemu(vmid).snapshot.get(): if snap.get('name') == snapname: return snap return None def _wait_until_timeout(self, node, taskid, vmstatus={}): """Wait until a task completed and meet expected vm status. :param node: the node name of the task :param taskid: the taskid :param vmstatus: expected vm status, e.g. {"vmid": "foo", "status": "bar"} :returns: (bool, string): task result, message """ if self.backend is not 'https': taskid = [data for data in taskid.split('\n') if 'UPID' in data][0] proxmox_node = self._proxmox_node(node) timeout = self.timeout while timeout >= 0: task_status = proxmox_node.tasks(taskid).status.get() if (task_status['status'] == 'stopped' and task_status['exitstatus'] == 'OK'): if not vmstatus: return True, "OK" else: if vmstatus['status'] in ('absent', 'present'): vms = [vm for vm in proxmox_node.qemu().get() if vm['vmid'] == vmstatus['vmid']] if vmstatus['status'] == 'absent' and len(vms) == 0: return True, "OK" elif vmstatus['status'] == 'present' and len(vms) == 1: return True, vms[0] else: vm = proxmox_node.qemu(vmstatus['vmid'] ).status.current.get() if vm['status'] == vmstatus['status']: return True, vm timeout = timeout - 1 if timeout == 0: msg = proxmox_node.tasks(taskid).log.get()[:1] return False, msg time.sleep(1) return False, msg def get_instance(self, vmid_or_name, node=None): """Get info of an instance. :param vmid_or_name: the instance name or vmid :param node: node name of the instance (default None) :returns: (bool, string) -- (result of the task, vm info if success or error message if fail) """ if self._is_int(vmid_or_name): field = 'vmid' vmid_or_name = int(vmid_or_name) else: field = 'name' vmid_or_name = str(vmid_or_name) vms = [] for vm in self.proxmox.cluster.resources.get(type='vm'): if vm.get(field) == vmid_or_name and (not node or vm.get('node') == node): vms.append(vm) if len(vms) == 1: return True, vms[0] elif len(vms) == 0: return False, "No instance with name or id %s found" % vmid_or_name else: return (False, "More than one instance with name or id %s found" % vmid_or_name) def clone_instance(self, name, template, node): """Create new instance based on a template. Create a new instance based on given template, it will use the max_id+1 as vmid, and will wait until the task done and the vm is present on the node. :param name: the new instance name :param template: the template name or vmid :param node: node name for the new instance :returns: (bool, bool,string) -- (result of the task, changed or not on proxmox, message for user) """ existed, _ = self.get_instance(name, node) if existed: return False, False, "VM with name = %s already exists" % name if not self._is_node_valid(node): return False, False, "node '%s' not exists in cluster" % node template_existed, t_vm = self.get_instance(template, node=node) if not template_existed: return False, False, "%s is not existed" % template elif template_existed and t_vm['template'] == 0: return False, False, "%s is not a valid template" % template else: proxmox_node = self._proxmox_node(node) next_id = max([vm['vmid'] for vm in proxmox_node.qemu.get()]) + 1 taskid = proxmox_node.qemu(t_vm['vmid']).clone.post(newid=next_id, name=name) expected_status = {"vmid": next_id, "status": 'present'} result, log = self._wait_until_timeout(node, taskid, expected_status) if not result: return (False, True, "Reached timeout while waiting for clone " "VM, last line in task before timeout %s" % log) else: return True, True, "cloned" def start_instance(self, name, node=None): """Start an instance. Start an instance with given name or id, it will wait until the task done and vm in 'running' status. :param name: the instance name or id :param node: node name for the instance, (default None) :returns: (bool, bool,string) -- (result of the task, changed or not on proxmox, message for user) """ rc, msg = self.get_instance(name, node) if not rc: return (rc, False, msg) elif msg['status'] == 'running': msg = "VM %s is already running" % name return True, False, msg else: vm = msg proxmox_node = self._proxmox_node(vm['node']) taskid = proxmox_node.qemu(vm['vmid']).status.start.post() expected_status = {"vmid": vm['vmid'], "status": 'running'} success, log = self._wait_until_timeout(vm['node'], taskid, expected_status) if not success: return (False, True, "Reached timeout while waiting for " "starting VM, last line in task before " "timeout %s" % log) else: return True, True, "started" def stop_instance(self, name, node=None, force=False): """Stop an instance. Stop an instance with given name or id, it will wait until the task done and vm in 'stopped' status. if force=True, it will call stop for the instance, otherwise will call (ACPI) shutdown. :param name: the instance name or id :param node: node name for the instance, (default None) :param force: stop (True) or shutdown (False), (default False) :returns: (bool, bool,string) -- (result of the task, changed or not on proxmox, message for user) """ rc, msg = self.get_instance(name, node) if not rc: return rc, False, msg elif msg['status'] == 'stopped': msg = "VM %s is already stopped" % name return True, False, msg else: vm = msg proxmox_node = self._proxmox_node(vm['node']) if force: taskid = proxmox_node.qemu(vm['vmid']).status.stop.post() else: taskid = proxmox_node.qemu(vm['vmid']).status.shutdown.post() expected_status = {"vmid": vm['vmid'], "status": 'stopped'} success, log = self._wait_until_timeout(vm['node'], taskid, expected_status) if not success: return (False, True, "Reached timeout while waiting for " "stopping VM, last line in task before " "timeout %s" % log) else: return True, True, "stopped" def delete_instance(self, name, node=None, force=False): """Delete an instance. Stop an instance with given name or id, it will wait until the task done and vm in absent on node. if force=True, it will stop the instance first if it's still in 'running' status, otherwise the task will fail. :param name: the instance name or id :param node: node name for the instance, (default None) :param force: force delete or not, (default False) :returns: (bool, bool,string) -- (result of the task, changed or not on proxmox, message for user) """ rc, msg = self.get_instance(name, node) if not rc: msg = "VM %s is already absent" % name return True, False, msg elif msg['status'] != 'stopped' and not force: msg = "VM %s is not stopped" % name return False, False, msg else: vm = msg proxmox_node = self._proxmox_node(vm['node']) if msg['status'] != 'stopped' and force: self.stop_instance(name, node, force=True) taskid = proxmox_node.qemu(vm['vmid']).delete() expected_status = {"vmid": vm['vmid'], "status": 'absent'} success, log = self._wait_until_timeout(vm['node'], taskid, expected_status) if not success: return (False, True, "Reached timeout while waiting for " "deleting VM, last line in task before " "timeout %s" % log) else: return True, True, "deleted" def snapshot_instance(self, name, snapname, node=None): """Take a snapshot for an instance. :param name: the instance name or id :param snapname: the name of the snapshot :param node: node name for the instance, (default None) :returns: (bool, bool,string) -- (result of the task, changed or not on proxmox, message for user) """ rc, msg = self.get_instance(name, node) if not rc: return rc, False, msg else: vm = msg snap = self._get_snapshot(vm['node'], vm['vmid'], snapname) if snap: return True, False, "Snapshot %s exists" % snapname proxmox_node = self._proxmox_node(vm['node']) taskid = proxmox_node.qemu(vm['vmid']).snapshot().post( snapname=snapname, vmstate="1") success, log = self._wait_until_timeout(vm['node'], taskid) if not success: return (False, True, "Reached timeout while waiting for " "snapshot VM, last line in task before " "timeout %s" % log) else: return True, True, "snapshotted" def restore_instance(self, name, snapname, node=None): """Restore an instance from a snapshot. :param name: the instance name or id :param snapname: the name of the snapshot to restore from :param node: node name for the instance, (default None) :returns: (bool, bool,string) -- (result of the task, changed or not on proxmox, message for user) """ rc, msg = self.get_instance(name, node) if not rc: return rc, False, msg else: vm = msg snap = self._get_snapshot(vm['node'], vm['vmid'], snapname) if not snap: return False, False, "Snapshot %s not found" % snapname proxmox_node = self._proxmox_node(vm['node']) taskid = proxmox_node.qemu(vm['vmid']).snapshot( snapname).rollback.post() success, log = self._wait_until_timeout(vm['node'], taskid) if not success: return (False, True, "Reached timeout while waiting for " "restore VM, last line in task before " "timeout %s" % log) else: return True, True, "restored"
class TestSuite(): proxmox = None serializer = None session = None # noinspection PyMethodOverriding @patch('requests.sessions.Session') def setUp(self, session): response = {'ticket': 'ticket', 'CSRFPreventionToken': 'CSRFPreventionToken'} session.request.return_value = response self.proxmox = ProxmoxAPI('proxmox', user='******', password='******', port=123, verify_ssl=False) self.serializer = MagicMock() self.session = MagicMock() self.session.request.return_value.status_code = 200 self.proxmox._store['session'] = self.session self.proxmox._store['serializer'] = self.serializer def test_get(self): self.proxmox.nodes('proxmox').storage('local').get() eq_(self.session.request.call_args[0], ('GET', 'https://proxmox:123/api2/json/nodes/proxmox/storage/local')) def test_delete(self): self.proxmox.nodes('proxmox').openvz(100).delete() eq_(self.session.request.call_args[0], ('DELETE', 'https://proxmox:123/api2/json/nodes/proxmox/openvz/100')) self.proxmox.nodes('proxmox').openvz('101').delete() eq_(self.session.request.call_args[0], ('DELETE', 'https://proxmox:123/api2/json/nodes/proxmox/openvz/101')) def test_post(self): node = self.proxmox.nodes('proxmox') node.openvz.create(vmid=800, ostemplate='local:vztmpl/debian-6-turnkey-core_12.0-1_i386.tar.gz', hostname='test', storage='local', memory=512, swap=512, cpus=1, disk=4, password='******', ip_address='10.0.100.222') eq_(self.session.request.call_args[0], ('POST', 'https://proxmox:123/api2/json/nodes/proxmox/openvz')) ok_('data' in self.session.request.call_args[1]) data = self.session.request.call_args[1]['data'] eq_(data['cpus'], 1) eq_(data['disk'], 4) eq_(data['hostname'], 'test') eq_(data['ip_address'], '10.0.100.222') eq_(data['memory'], 512) eq_(data['ostemplate'], 'local:vztmpl/debian-6-turnkey-core_12.0-1_i386.tar.gz') eq_(data['password'], 'secret') eq_(data['storage'], 'local') eq_(data['swap'], 512) eq_(data['vmid'], 800) node = self.proxmox.nodes('proxmox1') node.openvz.post(vmid=900, ostemplate='local:vztmpl/debian-7-turnkey-core_12.0-1_i386.tar.gz', hostname='test1', storage='local1', memory=1024, swap=1024, cpus=2, disk=8, password='******', ip_address='10.0.100.111') eq_(self.session.request.call_args[0], ('POST', 'https://proxmox:123/api2/json/nodes/proxmox1/openvz')) ok_('data' in self.session.request.call_args[1]) data = self.session.request.call_args[1]['data'] eq_(data['cpus'], 2) eq_(data['disk'], 8) eq_(data['hostname'], 'test1') eq_(data['ip_address'], '10.0.100.111') eq_(data['memory'], 1024) eq_(data['ostemplate'], 'local:vztmpl/debian-7-turnkey-core_12.0-1_i386.tar.gz') eq_(data['password'], 'secret1') eq_(data['storage'], 'local1') eq_(data['swap'], 1024) eq_(data['vmid'], 900) def test_put(self): node = self.proxmox.nodes('proxmox') node.openvz(101).config.set(cpus=4, memory=1024, ip_address='10.0.100.100', onboot=True) eq_(self.session.request.call_args[0], ('PUT', 'https://proxmox:123/api2/json/nodes/proxmox/openvz/101/config')) data = self.session.request.call_args[1]['data'] eq_(data['cpus'], 4) eq_(data['memory'], 1024) eq_(data['ip_address'], '10.0.100.100') eq_(data['onboot'], True) node = self.proxmox.nodes('proxmox1') node.openvz(102).config.put(cpus=2, memory=512, ip_address='10.0.100.200', onboot=False) eq_(self.session.request.call_args[0], ('PUT', 'https://proxmox:123/api2/json/nodes/proxmox1/openvz/102/config')) data = self.session.request.call_args[1]['data'] eq_(data['cpus'], 2) eq_(data['memory'], 512) eq_(data['ip_address'], '10.0.100.200') eq_(data['onboot'], False)
def main(): module = AnsibleModule( argument_spec=dict( api_host=dict(required=True), api_user=dict(required=True), api_password=dict(no_log=True), vmid=dict(required=False), validate_certs=dict(type='bool', default='no'), node=dict(), pool=dict(), password=dict(no_log=True), hostname=dict(), ostemplate=dict(), disk=dict(type='str', default='3'), cores=dict(type='int', default=1), cpus=dict(type='int', default=1), memory=dict(type='int', default=512), swap=dict(type='int', default=0), netif=dict(type='dict'), mounts=dict(type='dict'), ip_address=dict(), onboot=dict(type='bool', default='no'), storage=dict(default='local'), cpuunits=dict(type='int', default=1000), nameserver=dict(), searchdomain=dict(), timeout=dict(type='int', default=30), force=dict(type='bool', default='no'), state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted']), pubkey=dict(type='str', default=None), unprivileged=dict(type='bool', default='no') ) ) if not HAS_PROXMOXER: module.fail_json(msg='proxmoxer required for this module') state = module.params['state'] api_user = module.params['api_user'] api_host = module.params['api_host'] api_password = module.params['api_password'] vmid = module.params['vmid'] validate_certs = module.params['validate_certs'] node = module.params['node'] disk = module.params['disk'] cpus = module.params['cpus'] memory = module.params['memory'] swap = module.params['swap'] storage = module.params['storage'] hostname = module.params['hostname'] if module.params['ostemplate'] is not None: template_store = module.params['ostemplate'].split(":")[0] timeout = module.params['timeout'] # If password not set get it from PROXMOX_PASSWORD env if not api_password: try: api_password = os.environ['PROXMOX_PASSWORD'] except KeyError as e: module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable') try: proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs) global VZ_TYPE VZ_TYPE = 'openvz' if proxmox_version(proxmox) < LooseVersion('4.0') else 'lxc' except Exception as e: module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e) # If vmid not set get the Next VM id from ProxmoxAPI # If hostname is set get the VM id from ProxmoxAPI if not vmid and state == 'present': vmid = get_nextvmid(module, proxmox) elif not vmid and hostname: hosts = get_vmid(proxmox, hostname) if len(hosts) == 0: module.fail_json(msg="Vmid could not be fetched => Hostname doesn't exist (action: %s)" % state) vmid = hosts[0] elif not vmid: module.exit_json(changed=False, msg="Vmid could not be fetched for the following action: %s" % state) if state == 'present': try: if get_instance(proxmox, vmid) and not module.params['force']: module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid) # If no vmid was passed, there cannot be another VM named 'hostname' if not module.params['vmid'] and get_vmid(proxmox, hostname) and not module.params['force']: module.exit_json(changed=False, msg="VM with hostname %s already exists and has ID number %s" % (hostname, get_vmid(proxmox, hostname)[0])) elif not (node, module.params['hostname'] and module.params['password'] and module.params['ostemplate']): module.fail_json(msg='node, hostname, password and ostemplate are mandatory for creating vm') elif not node_check(proxmox, node): module.fail_json(msg="node '%s' not exists in cluster" % node) elif not content_check(proxmox, node, module.params['ostemplate'], template_store): module.fail_json(msg="ostemplate '%s' not exists on node %s and storage %s" % (module.params['ostemplate'], node, template_store)) create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, cores=module.params['cores'], pool=module.params['pool'], password=module.params['password'], hostname=module.params['hostname'], ostemplate=module.params['ostemplate'], netif=module.params['netif'], mounts=module.params['mounts'], ip_address=module.params['ip_address'], onboot=int(module.params['onboot']), cpuunits=module.params['cpuunits'], nameserver=module.params['nameserver'], searchdomain=module.params['searchdomain'], force=int(module.params['force']), pubkey=module.params['pubkey'], unprivileged=int(module.params['unprivileged'])) module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmid, module.params['ostemplate'])) except Exception as e: module.fail_json(msg="creation of %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e)) elif state == 'started': try: vm = get_instance(proxmox, vmid) if not vm: module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid) if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running': module.exit_json(changed=False, msg="VM %s is already running" % vmid) if start_instance(module, proxmox, vm, vmid, timeout): module.exit_json(changed=True, msg="VM %s started" % vmid) except Exception as e: module.fail_json(msg="starting of VM %s failed with exception: %s" % (vmid, e)) elif state == 'stopped': try: vm = get_instance(proxmox, vmid) if not vm: module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid) if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted': if module.params['force']: if umount_instance(module, proxmox, vm, vmid, timeout): module.exit_json(changed=True, msg="VM %s is shutting down" % vmid) else: module.exit_json(changed=False, msg=("VM %s is already shutdown, but mounted. " "You can use force option to umount it.") % vmid) if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped': module.exit_json(changed=False, msg="VM %s is already shutdown" % vmid) if stop_instance(module, proxmox, vm, vmid, timeout, force=module.params['force']): module.exit_json(changed=True, msg="VM %s is shutting down" % vmid) except Exception as e: module.fail_json(msg="stopping of VM %s failed with exception: %s" % (vmid, e)) elif state == 'restarted': try: vm = get_instance(proxmox, vmid) if not vm: module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid) if (getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped' or getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted'): module.exit_json(changed=False, msg="VM %s is not running" % vmid) if (stop_instance(module, proxmox, vm, vmid, timeout, force=module.params['force']) and start_instance(module, proxmox, vm, vmid, timeout)): module.exit_json(changed=True, msg="VM %s is restarted" % vmid) except Exception as e: module.fail_json(msg="restarting of VM %s failed with exception: %s" % (vmid, e)) elif state == 'absent': try: vm = get_instance(proxmox, vmid) if not vm: module.exit_json(changed=False, msg="VM %s does not exist" % vmid) if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running': module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid) if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted': module.exit_json(changed=False, msg="VM %s is mounted. Stop it with force option before deletion." % vmid) taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE).delete(vmid) while timeout: if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'): module.exit_json(changed=True, msg="VM %s removed" % vmid) timeout -= 1 if timeout == 0: module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s' % proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1]) time.sleep(1) except Exception as e: module.fail_json(msg="deletion of VM %s failed with exception: %s" % (vmid, to_native(e)))
args = parser.parse_args() vmid = str(args.vmid) name = args.name flavor_type = args.flavor storage_type = args.storage # customize flavor instance, flavor = instance_customize(instance, vmid, name, flavor_type) # proxmoxer initialize proxmox_api = ProxmoxAPI(proxmox['host'], user=proxmox['user'], password=proxmox['password'], verify_ssl=proxmox['verify_ssl']) node = proxmox_api.nodes(proxmox['node']) # create kvm machine node.qemu.create(vmid=vmid, name=name, sockets=flavor['sockets'], cores=flavor['cores'], balloon=flavor['balloon'], memory=flavor['memory'], net0=instance['net']) # seeding seed(vmid, instance, proxmox) # set seed iso node.qemu(vmid).config.set(virtio1=instance['hd_seed']) # create root volume if storage_type == 'dir': dir_volume(vmid, instance, proxmox)
verify_ssl=False) def isUserExist(proxmox, user): result = False for item in proxmox.access.users.get(): if item['userid'] == user: result = True return result if not isUserExist(proxmox, 'u' + userID + '@pve'): proxmox.access.users.create(userid='u' + userID + '@pve', password='******') node = proxmox.nodes(hvname) node.qemu.create(vmid=vmid, ostype='l26', name=userID + '.users.justhost.ru', storage='local', memory=512, sockets=1, cores=1, net0='rtl8139,rate=50,bridge=vmbr0', virtio0='local:' + str(vmid) + '/vm-' + str(vmid) + '-disk-1.qcow2,cache=writeback,format=qcow2,size=5G', cdrom='none') # Время на распаковку архива контейнера time.sleep(10)
cluster_data['status']['ram_free'] += node.get('maxmem', 0) - node.get( 'mem', 0) # update cluster total ram usage percentage if float(cluster_data['status']['ram_total']) > 0: cluster_data['status']['ram_usage'] = 100 * ( float(cluster_data['status']['ram_used']) / float(cluster_data['status']['ram_total'])) # get ksm sharing and cpu usage info from online nodes cpu_usage_combined = 0 for n in cluster_data['nodes']: if cluster_data['nodes'][n]['online'] == 1: cluster_data['status']['nodes_online'] += 1 cpu_usage_combined += cluster_data['nodes'][n]['cpu_usage'] node_status = proxmox.nodes(n).status.get() cluster_data['status']['ksm_sharing'] += node_status['ksm'].get( 'shared', 0) cluster_data['nodes'][n]['ksm_sharing'] += node_status['ksm'].get( 'shared', 0) # calculate cluster total cpu usage percentage if float(cluster_data['status']['nodes_online']) > 0: cluster_data['status']['cpu_usage'] = ( float(cpu_usage_combined) / float(cluster_data['status']['nodes_online'])) # regular expression to match disk strings in vm config disk_pattern = re.compile(r"vm-\d+-disk-\d+") # regular expression to match size block in config string size_pattern = re.compile(r"^size=\d+[T|G|M|K]")
proxmox = ProxmoxAPI(nodes[hvname]['name'], user='******', password=nodes[hvname]['password'], verify_ssl=False) def isUserExist(proxmox, user): result = False for item in proxmox.access.users.get(): if item['userid'] == user: result = True return result if not isUserExist(proxmox, 'u' + userID + '@pve'): proxmox.access.users.create(userid='u' + userID + '@pve', password='******') node = proxmox.nodes(hvname) node.openvz.create(vmid=vmid, ostemplate=ostemplates[4], hostname=userID + '.users.justhost.ru', storage='local', memory=512, swap=0, cpus=1, disk=5, password='******', ip_address='IP', nameserver='46.17.40.200 46.17.46.200') # Время на распаковку архива контейнера time.sleep(30)
force=int(module.params['force'])) module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmid, module.params['ostemplate'])) except Exception, e: module.fail_json( msg="creation of VM %s failed with exception: %s" % (vmid, e)) elif state == 'started': try: vm = get_instance(proxmox, vmid) if not vm: module.fail_json( msg='VM with vmid = %s not exists in cluster' % vmid) if proxmox.nodes(vm[0]['node']).openvz( vmid).status.current.get()['status'] == 'running': module.exit_json(changed=False, msg="VM %s is already running" % vmid) if start_instance(module, proxmox, vm, vmid, timeout): module.exit_json(changed=True, msg="VM %s started" % vmid) except Exception, e: module.fail_json( msg="starting of VM %s failed with exception: %s" % (vmid, e)) elif state == 'stopped': try: vm = get_instance(proxmox, vmid) if not vm: module.fail_json( msg='VM with vmid = %s not exists in cluster' % vmid)
# cluster: # - "cluster1" # - "cluster2" pve_config = yaml.load(open('/root/proxmox/proxmox.yml', 'r')) pve_apiuser = pve_config['apiuser'] pve_apipass = pve_config['apipass'] pve_cluster = pve_config['cluster'] parser = argparse.ArgumentParser(description='Display VM Configuration') parser.add_argument('vmname', nargs='+', help='VM to look up') args = parser.parse_args() vm_found = None for cluster in pve_cluster: pve = ProxmoxAPI(cluster, user=pve_apiuser, password=pve_apipass, verify_ssl=False) for node in pve.nodes.get(): for vm in pve.nodes(node['node']).qemu.get(): if vm['name'] in args.vmname: # Print a blank line between matching VMs if vm_found == True: print() vm_found = True # Print VM configuration vmconfig = pve.nodes(node['node']).qemu(vm['vmid']).get('config') vmconfig.update({'cluster' : cluster, 'node' : node['node']}) for k, v in sorted(vmconfig.items()): print('{0:>10}: {1}'.format(k, v))
def process(self, id): localport = self.utils.open_port(id, "8006") try: if localport != None: ip = "localhost" port = str(localport) else: ip = str(self.store.get_attr("base", id, "base.net.ip")).strip() port = "8006" proxmox = ProxmoxAPI( ip, port=port, user=str(self.store.get_attr( "base", id, "base.ssh.user")).strip() + '@pam', password=str( self.store.get_attr("base", id, "base.ssh.password")).strip(), verify_ssl=False) self.store.set_attr("module", id, "module.discover.proxmox", "Yes") for node in proxmox.nodes.get(): self.store.set_attr("base", id, "base.name", node['node']) arp = self.arp_table(id) try: for vm in proxmox.nodes(node['node']).qemu.get(): k = self.find(vm["vmid"], id) self.store.set_attr("base", k, "base.name", vm["name"]) self.store.set_attr("base", k, "base.proxmox.id", vm["vmid"]) self.store.set_attr("base", k, "base.core.schema", "VM") for i in proxmox.nodes(node['node']).qemu( vm["vmid"]).config.get(): if 'net' in i: try: eth = i self.store.set_attr( "base", k, "base.net.eth", eth) mac = proxmox.nodes(node['node']).qemu( vm["vmid"]).config.get()[i].split( "=")[1].split(",")[0] self.store.set_attr( "base", k, "base.net.mac", mac) ip = arp[mac] self.store.set_attr( "base", k, "base.net.ip", ip) except: pass except: pass try: for vm in proxmox.nodes(node['node']).lxc.get(): k = self.find(vm["vmid"], id) self.store.set_attr("base", k, "base.name", vm["name"]) self.store.set_attr("base", k, "base.proxmox.id", vm["vmid"]) self.store.set_attr("base", k, "base.core.schema", "Container") for i in proxmox.nodes(node['node']).lxc( vm["vmid"]).config.get(): if 'net' in i: try: eth = i self.store.set_attr( "base", k, "base.net.eth", eth) mac = proxmox.nodes(node['node']).lxc( vm["vmid"]).config.get()[i].split( ",")[3].split("=")[1] self.store.set_attr( "base", k, "base.net.mac", mac) ip = arp[mac] self.store.set_attr( "base", k, "base.net.ip", ip) except: pass except: pass try: for vm in proxmox.nodes(node['node']).openvz.get(): k = self.find(vm["vmid"], id) self.store.set_attr("base", k, "base.name", vm["name"]) self.store.set_attr("base", k, "base.proxmox.id", vm["vmid"]) self.store.set_attr("base", k, "base.core.schema", "Container") for i in proxmox.nodes(node['node']).openvz( vm["vmid"]).config.get(): if 'net' in i: try: eth = i self.store.set_attr( "base", k, "base.net.eth", eth) mac = proxmox.nodes(node['node']).openvz( vm["vmid"]).config.get()[i].split( ",")[3].split("=")[1] self.store.set_attr( "base", k, "base.net.mac", mac) ip = arp[mac] self.store.set_attr( "base", k, "base.net.ip", ip) except: pass except: pass except: self.store.set_attr("module", id, "module.discover.proxmox", "No") if localport != None: self.utils.close_port(localport)
def main(): module = AnsibleModule( argument_spec=dict( acpi=dict(type='bool', default='yes'), agent=dict(type='bool'), args=dict(type='str', default=None), api_host=dict(required=True), api_user=dict(required=True), api_password=dict(no_log=True), autostart=dict(type='bool', default='no'), balloon=dict(type='int', default=0), bios=dict(choices=['seabios', 'ovmf']), boot=dict(type='str', default='cnd'), bootdisk=dict(type='str'), clone=dict(type='str', default=None), cores=dict(type='int', default=1), cpu=dict(type='str', default='kvm64'), cpulimit=dict(type='int'), cpuunits=dict(type='int', default=1000), delete=dict(type='str', default=None), description=dict(type='str'), digest=dict(type='str'), force=dict(type='bool', default=None), format=dict(type='str', default='qcow2', choices=['cloop', 'cow', 'qcow', 'qcow2', 'qed', 'raw', 'vmdk']), freeze=dict(type='bool'), full=dict(type='bool', default='yes'), hostpci=dict(type='dict'), hotplug=dict(type='str'), hugepages=dict(choices=['any', '2', '1024']), ide=dict(type='dict', default=None), keyboard=dict(type='str'), kvm=dict(type='bool', default='yes'), localtime=dict(type='bool'), lock=dict(choices=['migrate', 'backup', 'snapshot', 'rollback']), machine=dict(type='str'), memory=dict(type='int', default=512), migrate_downtime=dict(type='int'), migrate_speed=dict(type='int'), name=dict(type='str'), net=dict(type='dict'), newid=dict(type='int', default=None), node=dict(), numa=dict(type='dict'), numa_enabled=dict(type='bool'), onboot=dict(type='bool', default='yes'), ostype=dict(default='l26', choices=['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'l24', 'l26', 'solaris']), parallel=dict(type='dict'), pool=dict(type='str'), protection=dict(type='bool'), reboot=dict(type='bool'), revert=dict(type='str', default=None), sata=dict(type='dict'), scsi=dict(type='dict'), scsihw=dict(choices=['lsi', 'lsi53c810', 'virtio-scsi-pci', 'virtio-scsi-single', 'megasas', 'pvscsi']), serial=dict(type='dict'), shares=dict(type='int'), skiplock=dict(type='bool'), smbios=dict(type='str'), snapname=dict(type='str'), sockets=dict(type='int', default=1), startdate=dict(type='str'), startup=dict(), state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted', 'current']), storage=dict(type='str'), tablet=dict(type='bool', default='no'), target=dict(type='str'), tdf=dict(type='bool'), template=dict(type='bool', default='no'), timeout=dict(type='int', default=30), update=dict(type='bool', default='no'), validate_certs=dict(type='bool', default='no'), vcpus=dict(type='int', default=None), vga=dict(default='std', choices=['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4']), virtio=dict(type='dict', default=None), vmid=dict(type='int', default=None), watchdog=dict(), ), mutually_exclusive=[('delete', 'revert'), ('delete', 'update'), ('revert', 'update'), ('clone', 'update'), ('clone', 'delete'), ('clone', 'revert')], required_one_of=[('name', 'vmid',)], required_if=[('state', 'present', ['node'])] ) if not HAS_PROXMOXER: module.fail_json(msg='proxmoxer required for this module') api_user = module.params['api_user'] api_host = module.params['api_host'] api_password = module.params['api_password'] clone = module.params['clone'] cpu = module.params['cpu'] cores = module.params['cores'] delete = module.params['delete'] memory = module.params['memory'] name = module.params['name'] newid = module.params['newid'] node = module.params['node'] revert = module.params['revert'] sockets = module.params['sockets'] state = module.params['state'] timeout = module.params['timeout'] update = bool(module.params['update']) vmid = module.params['vmid'] validate_certs = module.params['validate_certs'] # If password not set get it from PROXMOX_PASSWORD env if not api_password: try: api_password = os.environ['PROXMOX_PASSWORD'] except KeyError as e: module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable') try: proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs) global VZ_TYPE global PVE_MAJOR_VERSION PVE_MAJOR_VERSION = 3 if float(proxmox.version.get()['version']) < 4.0 else 4 except Exception as e: module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e) # If vmid not set get the Next VM id from ProxmoxAPI # If vm name is set get the VM id from ProxmoxAPI if not vmid: if state == 'present' and (not update and not clone) and (not delete and not revert): try: vmid = get_nextvmid(module, proxmox) except Exception as e: module.fail_json(msg="Can't get the next vimd for VM {} automatically. Ensure your cluster state is good".format(name)) else: try: if not clone: vmid = get_vmid(proxmox, name)[0] else: vmid = get_vmid(proxmox, clone)[0] except Exception as e: if not clone: module.fail_json(msg="VM {} does not exist in cluster.".format(name)) else: module.fail_json(msg="VM {} does not exist in cluster.".format(clone)) if clone is not None: if get_vmid(proxmox, name): module.exit_json(changed=False, msg="VM with name <%s> already exists" % name) if vmid is not None: vm = get_vm(proxmox, vmid) if not vm: module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid) if not newid: try: newid = get_nextvmid(module, proxmox) except Exception as e: module.fail_json(msg="Can't get the next vimd for VM {} automatically. Ensure your cluster state is good".format(name)) else: vm = get_vm(proxmox, newid) if vm: module.exit_json(changed=False, msg="vmid %s with VM name %s already exists" % (newid, name)) if delete is not None: try: settings(module, proxmox, vmid, node, name, timeout, delete=delete) module.exit_json(changed=True, msg="Settings has deleted on VM {} with vmid {}".format(name, vmid)) except Exception as e: module.fail_json(msg='Unable to delete settings on VM {} with vimd {}: '.format(name, vmid) + str(e)) elif revert is not None: try: settings(module, proxmox, vmid, node, name, timeout, revert=revert) module.exit_json(changed=True, msg="Settings has reverted on VM {} with vmid {}".format(name, vmid)) except Exception as e: module.fail_json(msg='Unable to revert settings on VM {} with vimd {}: Maybe is not a pending task... '.format(name, vmid) + str(e)) if state == 'present': try: if get_vm(proxmox, vmid) and not (update or clone): module.exit_json(changed=False, msg="VM with vmid <%s> already exists" % vmid) elif get_vmid(proxmox, name) and not (update or clone): module.exit_json(changed=False, msg="VM with name <%s> already exists" % name) elif not (node, name): module.fail_json(msg='node, name is mandatory for creating/updating vm') elif not node_check(proxmox, node): module.fail_json(msg="node '%s' does not exist in cluster" % node) create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sockets, timeout, update, acpi=module.params['acpi'], agent=module.params['agent'], autostart=module.params['autostart'], balloon=module.params['balloon'], bios=module.params['bios'], boot=module.params['boot'], bootdisk=module.params['bootdisk'], cpulimit=module.params['cpulimit'], cpuunits=module.params['cpuunits'], description=module.params['description'], digest=module.params['digest'], force=module.params['force'], freeze=module.params['freeze'], hostpci=module.params['hostpci'], hotplug=module.params['hotplug'], hugepages=module.params['hugepages'], ide=module.params['ide'], keyboard=module.params['keyboard'], kvm=module.params['kvm'], localtime=module.params['localtime'], lock=module.params['lock'], machine=module.params['machine'], migrate_downtime=module.params['migrate_downtime'], migrate_speed=module.params['migrate_speed'], net=module.params['net'], numa=module.params['numa'], numa_enabled=module.params['numa_enabled'], onboot=module.params['onboot'], ostype=module.params['ostype'], parallel=module.params['parallel'], pool=module.params['pool'], protection=module.params['protection'], reboot=module.params['reboot'], sata=module.params['sata'], scsi=module.params['scsi'], scsihw=module.params['scsihw'], serial=module.params['serial'], shares=module.params['shares'], skiplock=module.params['skiplock'], smbios1=module.params['smbios'], snapname=module.params['snapname'], startdate=module.params['startdate'], startup=module.params['startup'], tablet=module.params['tablet'], target=module.params['target'], tdf=module.params['tdf'], template=module.params['template'], vcpus=module.params['vcpus'], vga=module.params['vga'], virtio=module.params['virtio'], watchdog=module.params['watchdog']) if not clone: get_vminfo(module, proxmox, node, vmid, ide=module.params['ide'], net=module.params['net'], sata=module.params['sata'], scsi=module.params['scsi'], virtio=module.params['virtio']) if update: module.exit_json(changed=True, msg="VM %s with vmid %s updated" % (name, vmid)) elif clone is not None: module.exit_json(changed=True, msg="VM %s with newid %s cloned from vm with vmid %s" % (name, newid, vmid)) else: module.exit_json(changed=True, msg="VM %s with vmid %s deployed" % (name, vmid), **results) except Exception as e: if update: module.fail_json(msg="Unable to update vm {} with vimd {}=".format(name, vmid) + str(e)) elif clone is not None: module.fail_json(msg="Unable to clone vm {} from vimd {}=".format(name, vmid) + str(e)) else: module.fail_json(msg="creation of %s VM %s with vmid %s failed with exception=%s" % (VZ_TYPE, name, vmid, e)) elif state == 'started': try: vm = get_vm(proxmox, vmid) if not vm: module.fail_json(msg='VM with vmid <%s> does not exist in cluster' % vmid) if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running': module.exit_json(changed=False, msg="VM %s is already running" % vmid) if start_vm(module, proxmox, vm, vmid, timeout): module.exit_json(changed=True, msg="VM %s started" % vmid) except Exception as e: module.fail_json(msg="starting of VM %s failed with exception: %s" % (vmid, e)) elif state == 'stopped': try: vm = get_vm(proxmox, vmid) if not vm: module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid) if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped': module.exit_json(changed=False, msg="VM %s is already stopped" % vmid) if stop_vm(module, proxmox, vm, vmid, timeout, force=module.params['force']): module.exit_json(changed=True, msg="VM %s is shutting down" % vmid) except Exception as e: module.fail_json(msg="stopping of VM %s failed with exception: %s" % (vmid, e)) elif state == 'restarted': try: vm = get_vm(proxmox, vmid) if not vm: module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid) if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped': module.exit_json(changed=False, msg="VM %s is not running" % vmid) if stop_vm(module, proxmox, vm, vmid, timeout, force=module.params['force']) and start_vm(module, proxmox, vm, vmid, timeout): module.exit_json(changed=True, msg="VM %s is restarted" % vmid) except Exception as e: module.fail_json(msg="restarting of VM %s failed with exception: %s" % (vmid, e)) elif state == 'absent': try: vm = get_vm(proxmox, vmid) if not vm: module.exit_json(changed=False, msg="VM %s does not exist" % vmid) if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running': module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid) taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE).delete(vmid) while timeout: if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'): module.exit_json(changed=True, msg="VM %s removed" % vmid) timeout -= 1 if timeout == 0: module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s' % proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1]) time.sleep(1) except Exception as e: module.fail_json(msg="deletion of VM %s failed with exception: %s" % (vmid, e)) elif state == 'current': status = {} try: vm = get_vm(proxmox, vmid) if not vm: module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid) current = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] status['status'] = current if status: module.exit_json(changed=False, msg="VM %s with vmid = %s is %s" % (name, vmid, current), **status) except Exception as e: module.fail_json(msg="Unable to get vm {} with vmid = {} status: ".format(name, vmid) + str(e))
proxmox_members = proxmox_api.nodes.get() checked_mac=[] ## For each node in cluster for node_member in proxmox_members: # Check if member is node for member in proxmox_cluster: if member["type"]=="node" and node_member["node"] == member["name"]: cluster_member=member # If host is alive (if is not alive, don't ask the node) if cluster_member["state"]==1: ## For each VM in a node, retrieve used mac address # print "Noeud: " + node_member["node"] + " && " + cluster_member["name"] vm_on_member = proxmox_api.nodes(cluster_member["name"]).qemu.get() for vm in vm_on_member: # print "VM " + str(vm["vmid"]) config = proxmox_api.nodes(cluster_member["name"]).qemu(vm["vmid"]).config.get() # print config for item in config: if item[:3]=="net": # Get Network Mac Address mac=config[item][6:][:17].lower() # print mac ## check if mac address is in virtual mac address list for failover in failover_info: # print str(failover["mac"]) + " == " + mac + "!" if str(failover["mac"]) == str(mac): # failover["mac"] could be present more than one because it appear for each failover ip. # If we move a ip, we must move all other ip associate to the same mac address move also.
from proxmoxer import ProxmoxAPI # apiuser: "******" # apipass: "******" # cluster: # - "cluster1" # - "cluster2" pve_config = yaml.load(open('/root/proxmox/proxmox.yml', 'r')) pve_apiuser = pve_config['apiuser'] pve_apipass = pve_config['apipass'] pve_cluster = pve_config['cluster'] print('{cluster:<16} {node:<16} {id:<4} {name:<16} {status:<12}'.format( cluster="CLUSTER", node="NODE", id="ID", name="NAME", status="STATUS")) for cluster in pve_cluster: pve = ProxmoxAPI(cluster, user=pve_apiuser, password=pve_apipass, verify_ssl=False) for node in pve.nodes.get(): for vm in pve.nodes(node['node']).qemu.get(): print('{cluster:<16} {node:<16} {id:<4} {name:<16} {status:<12}'.format( cluster=cluster, node=node['node'], id=vm['vmid'], name=vm['name'], status=vm['status']))
class TestSuite(): proxmox = None serializer = None session = None # noinspection PyMethodOverriding @patch('requests.sessions.Session') def setUp(self, session): response = { 'ticket': 'ticket', 'CSRFPreventionToken': 'CSRFPreventionToken' } session.request.return_value = response self.proxmox = ProxmoxAPI('proxmox', user='******', password='******', port=123, verify_ssl=False) self.serializer = MagicMock() self.session = MagicMock() self.session.request.return_value.status_code = 200 self.proxmox._store['session'] = self.session self.proxmox._store['serializer'] = self.serializer def test_get(self): self.proxmox.nodes('proxmox').storage('local').get() eq_(self.session.request.call_args[0], ('GET', 'https://proxmox:123/api2/json/nodes/proxmox/storage/local')) def test_delete(self): self.proxmox.nodes('proxmox').openvz(100).delete() eq_(self.session.request.call_args[0], ('DELETE', 'https://proxmox:123/api2/json/nodes/proxmox/openvz/100')) self.proxmox.nodes('proxmox').openvz('101').delete() eq_(self.session.request.call_args[0], ('DELETE', 'https://proxmox:123/api2/json/nodes/proxmox/openvz/101')) def test_post(self): node = self.proxmox.nodes('proxmox') node.openvz.create( vmid=800, ostemplate='local:vztmpl/debian-6-turnkey-core_12.0-1_i386.tar.gz', hostname='test', storage='local', memory=512, swap=512, cpus=1, disk=4, password='******', ip_address='10.0.100.222') eq_(self.session.request.call_args[0], ('POST', 'https://proxmox:123/api2/json/nodes/proxmox/openvz')) ok_('data' in self.session.request.call_args[1]) data = self.session.request.call_args[1]['data'] eq_(data['cpus'], 1) eq_(data['disk'], 4) eq_(data['hostname'], 'test') eq_(data['ip_address'], '10.0.100.222') eq_(data['memory'], 512) eq_(data['ostemplate'], 'local:vztmpl/debian-6-turnkey-core_12.0-1_i386.tar.gz') eq_(data['password'], 'secret') eq_(data['storage'], 'local') eq_(data['swap'], 512) eq_(data['vmid'], 800) node = self.proxmox.nodes('proxmox1') node.openvz.post( vmid=900, ostemplate='local:vztmpl/debian-7-turnkey-core_12.0-1_i386.tar.gz', hostname='test1', storage='local1', memory=1024, swap=1024, cpus=2, disk=8, password='******', ip_address='10.0.100.111') eq_(self.session.request.call_args[0], ('POST', 'https://proxmox:123/api2/json/nodes/proxmox1/openvz')) ok_('data' in self.session.request.call_args[1]) data = self.session.request.call_args[1]['data'] eq_(data['cpus'], 2) eq_(data['disk'], 8) eq_(data['hostname'], 'test1') eq_(data['ip_address'], '10.0.100.111') eq_(data['memory'], 1024) eq_(data['ostemplate'], 'local:vztmpl/debian-7-turnkey-core_12.0-1_i386.tar.gz') eq_(data['password'], 'secret1') eq_(data['storage'], 'local1') eq_(data['swap'], 1024) eq_(data['vmid'], 900) def test_put(self): node = self.proxmox.nodes('proxmox') node.openvz(101).config.set(cpus=4, memory=1024, ip_address='10.0.100.100', onboot=True) eq_(self.session.request.call_args[0], ('PUT', 'https://proxmox:123/api2/json/nodes/proxmox/openvz/101/config')) data = self.session.request.call_args[1]['data'] eq_(data['cpus'], 4) eq_(data['memory'], 1024) eq_(data['ip_address'], '10.0.100.100') eq_(data['onboot'], True) node = self.proxmox.nodes('proxmox1') node.openvz(102).config.put(cpus=2, memory=512, ip_address='10.0.100.200', onboot=False) eq_(self.session.request.call_args[0], ('PUT', 'https://proxmox:123/api2/json/nodes/proxmox1/openvz/102/config')) data = self.session.request.call_args[1]['data'] eq_(data['cpus'], 2) eq_(data['memory'], 512) eq_(data['ip_address'], '10.0.100.200') eq_(data['onboot'], False)
class TestSuite: proxmox = None serializer = None session = None # noinspection PyMethodOverriding @patch("requests.sessions.Session") def setUp(self, session): response = {"ticket": "ticket", "CSRFPreventionToken": "CSRFPreventionToken"} session.request.return_value = response self.proxmox = ProxmoxAPI("proxmox", user="******", password="******", port=123, verify_ssl=False) self.serializer = MagicMock() self.session = MagicMock() self.session.request.return_value.status_code = 200 self.proxmox._store["session"] = self.session self.proxmox._store["serializer"] = self.serializer def test_get(self): self.proxmox.nodes("proxmox").storage("local").get() eq_(self.session.request.call_args[0], ("GET", "https://proxmox:123/api2/json/nodes/proxmox/storage/local")) def test_delete(self): self.proxmox.nodes("proxmox").openvz(100).delete() eq_(self.session.request.call_args[0], ("DELETE", "https://proxmox:123/api2/json/nodes/proxmox/openvz/100")) self.proxmox.nodes("proxmox").openvz("101").delete() eq_(self.session.request.call_args[0], ("DELETE", "https://proxmox:123/api2/json/nodes/proxmox/openvz/101")) def test_post(self): node = self.proxmox.nodes("proxmox") node.openvz.create( vmid=800, ostemplate="local:vztmpl/debian-6-turnkey-core_12.0-1_i386.tar.gz", hostname="test", storage="local", memory=512, swap=512, cpus=1, disk=4, password="******", ip_address="10.0.100.222", ) eq_(self.session.request.call_args[0], ("POST", "https://proxmox:123/api2/json/nodes/proxmox/openvz")) ok_("data" in self.session.request.call_args[1]) data = self.session.request.call_args[1]["data"] eq_(data["cpus"], 1) eq_(data["disk"], 4) eq_(data["hostname"], "test") eq_(data["ip_address"], "10.0.100.222") eq_(data["memory"], 512) eq_(data["ostemplate"], "local:vztmpl/debian-6-turnkey-core_12.0-1_i386.tar.gz") eq_(data["password"], "secret") eq_(data["storage"], "local") eq_(data["swap"], 512) eq_(data["vmid"], 800) node = self.proxmox.nodes("proxmox1") node.openvz.post( vmid=900, ostemplate="local:vztmpl/debian-7-turnkey-core_12.0-1_i386.tar.gz", hostname="test1", storage="local1", memory=1024, swap=1024, cpus=2, disk=8, password="******", ip_address="10.0.100.111", ) eq_(self.session.request.call_args[0], ("POST", "https://proxmox:123/api2/json/nodes/proxmox1/openvz")) ok_("data" in self.session.request.call_args[1]) data = self.session.request.call_args[1]["data"] eq_(data["cpus"], 2) eq_(data["disk"], 8) eq_(data["hostname"], "test1") eq_(data["ip_address"], "10.0.100.111") eq_(data["memory"], 1024) eq_(data["ostemplate"], "local:vztmpl/debian-7-turnkey-core_12.0-1_i386.tar.gz") eq_(data["password"], "secret1") eq_(data["storage"], "local1") eq_(data["swap"], 1024) eq_(data["vmid"], 900) def test_put(self): node = self.proxmox.nodes("proxmox") node.openvz(101).config.set(cpus=4, memory=1024, ip_address="10.0.100.100", onboot=True) eq_(self.session.request.call_args[0], ("PUT", "https://proxmox:123/api2/json/nodes/proxmox/openvz/101/config")) data = self.session.request.call_args[1]["data"] eq_(data["cpus"], 4) eq_(data["memory"], 1024) eq_(data["ip_address"], "10.0.100.100") eq_(data["onboot"], True) node = self.proxmox.nodes("proxmox1") node.openvz(102).config.put(cpus=2, memory=512, ip_address="10.0.100.200", onboot=False) eq_( self.session.request.call_args[0], ("PUT", "https://proxmox:123/api2/json/nodes/proxmox1/openvz/102/config") ) data = self.session.request.call_args[1]["data"] eq_(data["cpus"], 2) eq_(data["memory"], 512) eq_(data["ip_address"], "10.0.100.200") eq_(data["onboot"], False)
def buildlxc(host): """ Creates lxc in proxmox using given hostresource configuration """ if not exists(host): raise ValueError("Host template is missing. Please create host template") container = Container.getContainer(HOST_CONTAINER) hostresource = container.loadResource(host) #get proxmox user and hypervisor userresource = proxmoxutil.listuser() if userresource is None: raise ValueError("No proxmox user found!! Please use proxmoxutil command to update user credentials") user = userresource.properties[PROPERTIES_USER] password = userresource.properties[PROPERTIES_PASSWORD] authrealm = userresource.properties[PROPERTIES_AUTHREALM] puser = user+'@'+authrealm primary = proxmoxutil.listprimary() if primary is None: raise ValueError("Primary proxmox hypervisor not found!! Please use proxmoxutil command to update primary hypervisor") hypervisor = primary.properties[PROPERTIES_HYPERVISOR] print "Authenticating "+puser +" on "+ hypervisor proxmox = ProxmoxAPI(hypervisor, user=puser, password=password, verify_ssl=False) node = proxmox.nodes(hostresource.properties[HYPERVISOR]) hostname = hostresource.properties[HOSTNAME] vmid = int(hostresource.properties[HOSTID]) ostemplate = str(hostresource.properties[PROPERTIES_OSTEMPLATE]) cpulimit = int(hostresource.properties[PROPERTIES_CPULIMIT]) cpuunits = int(hostresource.properties[PROPERTIES_CPUUNITS]) memory = int(hostresource.properties[PROPERTIES_MEMORY]) swap = int(hostresource.properties[PROPERTIES_SWAP]) storage = hostresource.properties[PROPERTIES_STORAGE] disk = int(hostresource.properties[PROPERTIES_DISK]) disksize="%dG"%(disk) interfaces = hostresource.properties[INTERFACES] i=0 netconfig = dict() for interface in interfaces: print "Configuring %s" %interface netconfig["net"+str(i)] = hostresource.properties[interface] i=i+1 print "Building LXC with the following parameters:" print "Vmid: %d" %vmid print "Template: %s" %ostemplate print "Cpu Limit: %d" %cpulimit print "Cpu Units: %d" %cpuunits print "Memory: %d" %memory print "Swap: %d" %swap print "Storage: %s" %storage print "Disk: %d" %disk node.lxc.create(vmid=vmid, hostname=hostname, ostemplate=ostemplate, password=DEFAULT_PASSWORD, cpuunits=cpuunits, cpulimit=cpulimit, memory=memory, swap=swap, **netconfig) print "Creating LXC....." time.sleep(30) print "Resizing rootfs" node.lxc(vmid).resize.put(disk='rootfs', size=disksize) time.sleep(30) print "LXC created"
def main(): module = AnsibleModule( argument_spec = dict( api_host = dict(required=True), api_user = dict(required=True), api_password = dict(no_log=True), vmid = dict(required=True), validate_certs = dict(type='bool', default='no'), node = dict(), password = dict(no_log=True), hostname = dict(), ostemplate = dict(), disk = dict(type='str', default='3'), cpus = dict(type='int', default=1), memory = dict(type='int', default=512), swap = dict(type='int', default=0), netif = dict(type='dict'), mounts = dict(type='dict'), ip_address = dict(), onboot = dict(type='bool', default='no'), storage = dict(default='local'), cpuunits = dict(type='int', default=1000), nameserver = dict(), searchdomain = dict(), timeout = dict(type='int', default=30), force = dict(type='bool', default='no'), state = dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted']), ) ) if not HAS_PROXMOXER: module.fail_json(msg='proxmoxer required for this module') state = module.params['state'] api_user = module.params['api_user'] api_host = module.params['api_host'] api_password = module.params['api_password'] vmid = module.params['vmid'] validate_certs = module.params['validate_certs'] node = module.params['node'] disk = module.params['disk'] cpus = module.params['cpus'] memory = module.params['memory'] swap = module.params['swap'] storage = module.params['storage'] if module.params['ostemplate'] is not None: template_store = module.params['ostemplate'].split(":")[0] timeout = module.params['timeout'] # If password not set get it from PROXMOX_PASSWORD env if not api_password: try: api_password = os.environ['PROXMOX_PASSWORD'] except KeyError as e: module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable') try: proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs) global VZ_TYPE VZ_TYPE = 'openvz' if float(proxmox.version.get()['version']) < 4.0 else 'lxc' except Exception as e: module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e) if state == 'present': try: if get_instance(proxmox, vmid) and not module.params['force']: module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid) elif not (node, module.params['hostname'] and module.params['password'] and module.params['ostemplate']): module.fail_json(msg='node, hostname, password and ostemplate are mandatory for creating vm') elif not node_check(proxmox, node): module.fail_json(msg="node '%s' not exists in cluster" % node) elif not content_check(proxmox, node, module.params['ostemplate'], template_store): module.fail_json(msg="ostemplate '%s' not exists on node %s and storage %s" % (module.params['ostemplate'], node, template_store)) create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, password = module.params['password'], hostname = module.params['hostname'], ostemplate = module.params['ostemplate'], netif = module.params['netif'], mounts = module.params['mounts'], ip_address = module.params['ip_address'], onboot = int(module.params['onboot']), cpuunits = module.params['cpuunits'], nameserver = module.params['nameserver'], searchdomain = module.params['searchdomain'], force = int(module.params['force'])) module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmid, module.params['ostemplate'])) except Exception as e: module.fail_json(msg="creation of %s VM %s failed with exception: %s" % ( VZ_TYPE, vmid, e )) elif state == 'started': try: vm = get_instance(proxmox, vmid) if not vm: module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid) if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running': module.exit_json(changed=False, msg="VM %s is already running" % vmid) if start_instance(module, proxmox, vm, vmid, timeout): module.exit_json(changed=True, msg="VM %s started" % vmid) except Exception as e: module.fail_json(msg="starting of VM %s failed with exception: %s" % ( vmid, e )) elif state == 'stopped': try: vm = get_instance(proxmox, vmid) if not vm: module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid) if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted': if module.params['force']: if umount_instance(module, proxmox, vm, vmid, timeout): module.exit_json(changed=True, msg="VM %s is shutting down" % vmid) else: module.exit_json(changed=False, msg=("VM %s is already shutdown, but mounted. " "You can use force option to umount it.") % vmid) if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped': module.exit_json(changed=False, msg="VM %s is already shutdown" % vmid) if stop_instance(module, proxmox, vm, vmid, timeout, force = module.params['force']): module.exit_json(changed=True, msg="VM %s is shutting down" % vmid) except Exception as e: module.fail_json(msg="stopping of VM %s failed with exception: %s" % ( vmid, e )) elif state == 'restarted': try: vm = get_instance(proxmox, vmid) if not vm: module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid) if ( getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped' or getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted' ): module.exit_json(changed=False, msg="VM %s is not running" % vmid) if ( stop_instance(module, proxmox, vm, vmid, timeout, force = module.params['force']) and start_instance(module, proxmox, vm, vmid, timeout) ): module.exit_json(changed=True, msg="VM %s is restarted" % vmid) except Exception as e: module.fail_json(msg="restarting of VM %s failed with exception: %s" % ( vmid, e )) elif state == 'absent': try: vm = get_instance(proxmox, vmid) if not vm: module.exit_json(changed=False, msg="VM %s does not exist" % vmid) if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running': module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid) if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted': module.exit_json(changed=False, msg="VM %s is mounted. Stop it with force option before deletion." % vmid) taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE).delete(vmid) while timeout: if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ): module.exit_json(changed=True, msg="VM %s removed" % vmid) timeout = timeout - 1 if timeout == 0: module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s' % proxmox_node.tasks(taskid).log.get()[:1]) time.sleep(1) except Exception as e: module.fail_json(msg="deletion of VM %s failed with exception: %s" % ( vmid, e ))
class KVM(): def __init__(self, uuid=None): self.proxmox = None self.node = None self.password = None self.uuid = uuid self.log = [] self.result = [] def connect(self, hostname, password, nodename): self.proxmox = ProxmoxAPI(hostname, user='******', password=password, verify_ssl=False) self.node = self.proxmox.nodes(nodename) self.password = pwgen() def create(self, vmid=100, hostname='newvm', mem=512, cpus=1): try: self.node.qemu.create(vmid=vmid, ostype='l26', name=hostname, storage='local', memory=mem, sockets=1, cores=cpus, net0='rtl8139,rate=50,bridge=vmbr0', virtio0='local:' + str(vmid) + '/vm-' + str(vmid) + '-disk-1.qcow2,cache=writeback,mbps_rd=5,mbps_wr=5', cdrom='none') except: return False finally: self.result.append('<result action="create" state="ready"/>') # Время на создание VM time.sleep(15) #self.node.qemu(vmid).config.post(onboot=1) return True def createstorage(self, vmid=100, size=5): """ Создать диск для ВМ """ try: self.node.storage.local.content.post( filename='vm-' + str(vmid) + '-disk-1.qcow2', format='qcow2', size=str(size) + 'G', vmid=vmid) except BaseException as err: self.log.append(err) return False finally: self.log.append('<result action="createstorage" state="ready"/>') return True def startvm(self, vmid=100, args=None): """ Запустить ВМ """ self.node.qemu(vmid).config.post(delete="args") #self.node.qemu(vmid).config.set(args='') if args: if len(args)>0: self.node.qemu(vmid).config.post(args=args) else: args = '-kernel /root/ipxe.lkrn -append \"dhcp && chain http://pxe.justhost.ru/uuid/' + self.uuid + '.boot.php\"' self.node.qemu(vmid).config.set(args=args) #self.node.qemu(int(vmid)).config.post(args=arguments) self.node.qemu(vmid).status.start.post() def stopvm(self, vmid=100): """ Остановить ВМ """ if self.isVMExist(vmid): self.node.qemu(vmid).status.stop.post() def deletevm(self, vmid=100): """ Удалить ВМ """ if self.isVMExist(vmid): self.node.qemu(vmid).delete() time.sleep(15) def isVMExist(self, vmid=100): """ Проверка на существование ВМ с указанным ID """ result = False for item in self.node.openvz.get(): if item['vmid'] == str(vmid): result=True for item in self.node.qemu.get(): if item['vmid'] == str(vmid): result=True return result def task(self, tasks): """ Пакетный режим """ result = [] flagerror = False for item in tasks: self.log.append(item) if item['action'] == 'connect': if self.connect(item['hostname'], item['password'], item['node']): result.append({'action': 'connect', 'status': 'error'}) else: result.append({'action': 'connect', 'status': 'error'}) elif item['action'] == 'create': self.createstorage(vmid=int(item['vmid']), size=item['hdd']) self.create(vmid=int(item['vmid']), hostname=item['hostname'], mem=item['mem'], cpus=item['cpu']) time.sleep(10) result.append({'action': 'create', 'status': 'ready'}) elif item['action'] == 'delete': self.log.append('Удаление KVM ' + str(item['vmid'])) self.deletevm(vmid=item['vmid']) time.sleep(10) self.log.append('Удалена KVM ' + str(item['vmid'])) result.append({'action': 'delete', 'status': 'ready'}) elif item['action'] == 'start': if 'args' in item.keys(): self.startvm(vmid=item['vmid'], args=item['args']) else: self.startvm(vmid=item['vmid']) result.append({'action': 'start', 'status': 'ready'}) elif item['action'] == 'stop': self.stopvm(vmid=item['vmid']) result.append({'action': 'stop', 'status': 'ready'}) elif item['action'] == 'wait': time.sleep(300) if flagerror: return {'result': 'kvm', 'status': 'ready', 'actions': result} else: return {'result': 'kvm', 'status': 'error', 'actions': result}