def create_vm_task(user, name, cores, memory, disk, iso): with app.app_context(): job = get_current_job() proxmox = connect_proxmox() db = connect_db() starrs = connect_starrs() logging.info('[{}] Creating VM.'.format(name)) set_job_status(job, 'creating VM') vmid = create_vm(proxmox, user, name, cores, memory, disk, iso) logging.info( '[{}] Waiting until Proxmox is done provisioning.'.format(name)) set_job_status(job, 'waiting for Proxmox') timeout = 20 retry = 0 while retry < timeout: if not VM(vmid).is_provisioned(): retry += 1 time.sleep(3) continue break if retry == timeout: logging.info('[{}] Failed to provision, deleting.'.format(name)) set_job_status(job, 'failed to provision') delete_vm_task(vmid) return logging.info('[{}] Registering in STARRS.'.format(name)) set_job_status(job, 'registering in STARRS') vm = VM(vmid) ip = get_next_ip(starrs, app.config['STARRS_IP_RANGE']) register_starrs(starrs, name, app.config['STARRS_USER'], vm.get_mac(), ip) set_job_status(job, 'setting VM expiration') get_vm_expire(db, vmid, app.config['VM_EXPIRE_MONTHS']) logging.info('[{}] VM successfully provisioned.'.format(name)) set_job_status(job, 'complete')
def process_expiring_vms_task(): with app.app_context(): proxmox = connect_proxmox() db = connect_db() connect_starrs() pools = get_pools(proxmox, db) expired_vms = [] for pool in pools: user = User(pool) expiring_vms = [] vms = user.vms for vm in vms: vm = VM(vm['vmid']) days = (vm.expire - datetime.date.today()).days if days in [10, 7, 3, 1, 0, -1, -2, -3, -4, -5, -6]: expiring_vms.append([vm.id, vm.name, days]) if days <= 0: expired_vms.append([vm.id, vm.name, days]) vm.stop() elif days <= -7: logging.info( 'Deleting {} ({}) as it has been at least a week since expiration.' .format(vm.name, vm.id)) send_stop_ssh_tunnel(vm.id) delete_vm_task(vm.id) if expiring_vms: send_vm_expire_email(pool, expiring_vms) if expired_vms: send_rtp_vm_delete_email(expired_vms)
def vm_mem(vmid, mem): user = User(session['userinfo']['preferred_username']) connect_proxmox() if user.rtp or int(vmid) in user.allowed_vms: vm = VM(vmid) cur_mem = vm.mem // 1024 if mem >= cur_mem: if vm.qmpstatus == 'running' or vm.qmpstatus == 'paused': usage_check = user.check_usage(0, mem - cur_mem, 0) else: usage_check = user.check_usage(0, mem, 0) if usage_check: return usage_check vm.set_mem(mem * 1024) return '', 200 else: return '', 403
def vm_cpu(vmid, cores): user = User(session['userinfo']['preferred_username']) connect_proxmox() if user.rtp or int(vmid) in user.allowed_vms: vm = VM(vmid) cur_cores = vm.cpu if cores >= cur_cores: if vm.qmpstatus == 'running' or vm.qmpstatus == 'paused': usage_check = user.check_usage(cores - cur_cores, 0, 0) else: usage_check = user.check_usage(cores, 0, 0) if usage_check: return usage_check vm.set_cpu(cores) return '', 200 else: return '', 403
def setup_template_task(template_id, name, user, ssh_key, cores, memory): with app.app_context(): job = get_current_job() proxmox = connect_proxmox() starrs = connect_starrs() db = connect_db() logging.info('[{}] Retrieving template info for template {}.'.format( name, template_id)) get_template(db, template_id) logging.info('[{}] Cloning template {}.'.format(name, template_id)) set_job_status(job, 'cloning template') vmid = clone_vm(proxmox, template_id, name, user) logging.info( '[{}] Waiting until Proxmox is done provisioning.'.format(name)) set_job_status(job, 'waiting for Proxmox') timeout = 25 retry = 0 while retry < timeout: if not VM(vmid).is_provisioned(): retry += 1 time.sleep(12) continue break if retry == timeout: logging.info('[{}] Failed to provision, deleting.'.format(name)) set_job_status(job, 'failed to provision') delete_vm_task(vmid) return logging.info('[{}] Registering in STARRS.'.format(name)) set_job_status(job, 'registering in STARRS') vm = VM(vmid) ip = get_next_ip(starrs, app.config['STARRS_IP_RANGE']) register_starrs(starrs, name, app.config['STARRS_USER'], vm.get_mac(), ip) get_vm_expire(db, vmid, app.config['VM_EXPIRE_MONTHS']) logging.info('[{}] Setting CPU and memory.'.format(name)) set_job_status(job, 'setting CPU and memory') vm.set_cpu(cores) vm.set_mem(memory) logging.info('[{}] Applying cloud-init config.'.format(name)) set_job_status(job, 'applying cloud-init') vm.set_ci_user(user) vm.set_ci_ssh_key(ssh_key) vm.set_ci_network() logging.info( '[{}] Waiting for STARRS to propogate before starting VM.'.format( name)) set_job_status(job, 'waiting for STARRS') job.save_meta() time.sleep(90) logging.info('[{}] Starting VM.'.format(name)) set_job_status(job, 'starting VM') job.save_meta() vm.start() logging.info('[{}] Template successfully provisioned.'.format(name)) set_job_status(job, 'completed') job.save_meta()
def delete(self): proxmox = connect_proxmox() proxmox.pools(self.name).delete() users = proxmox.access.users.get() if any(user['userid'] == '{}@csh.rit.edu'.format(self.name) for user in users): if ('rtp' not in proxmox.access.users('{}@csh.rit.edu'.format( self.name)).get()['groups']): proxmox.access.users('{}@csh.rit.edu'.format( self.name)).delete()
def set_boot_order(self, boot_order): proxmox = connect_proxmox() boot_order_lookup = { 'Floppy': 'a', 'Hard Disk': 'c', 'CD-ROM': 'd', 'Network': 'n' } raw_boot_order = '' for i in range(0, len(boot_order)): raw_boot_order += boot_order_lookup[boot_order[i]] proxmox.nodes(self.node).qemu(self.id).config.put(boot=raw_boot_order)
def list_vms(user_view=None): user = User(session['userinfo']['preferred_username']) rtp_view = False connect_proxmox() if user_view and not user.rtp: abort(403) elif user_view and user.rtp: user_view = User(user_view) vms = user_view.vms for pending_vm in user_view.pending_vms: vm = next((vm for vm in vms if vm['name'] == pending_vm['name']), None) if vm: vms[vms.index(vm)]['status'] = pending_vm['status'] vms[vms.index(vm)]['pending'] = True else: vms.append(pending_vm) rtp_view = user_view.name elif user.rtp: vms = get_pool_cache(db) rtp_view = True else: if user.active: vms = user.vms for pending_vm in user.pending_vms: vm = next( (vm for vm in vms if vm['name'] == pending_vm['name']), None) if vm: vms[vms.index(vm)]['status'] = pending_vm['status'] vms[vms.index(vm)]['pending'] = True else: vms.append(pending_vm) else: vms = 'INACTIVE' return render_template('list_vms.html', user=user, rtp_view=rtp_view, vms=vms)
def set_boot_order(self, boot_order): proxmox = connect_proxmox() boot_order_lookup = { 'Floppy': 'a', 'Hard Disk': 'c', 'CD-ROM': 'd', 'Network': 'n' } # Check if legacy format if all(order in boot_order_lookup.keys() for order in boot_order): raw_boot_order = '' for order in boot_order: raw_boot_order += boot_order_lookup[order] else: raw_boot_order = f"order={';'.join(boot_order)}" proxmox.nodes(self.node).qemu(self.id).config.put(boot=raw_boot_order)
def setup_template_task(template_id, name, user, ssh_key, cores, memory): with app.app_context(): job = get_current_job() proxmox = connect_proxmox() starrs = connect_starrs() db = connect_db() print("[{}] Retrieving template info for template {}.".format( name, template_id)) template = get_template(db, template_id) print("[{}] Cloning template {}.".format(name, template_id)) job.meta['status'] = 'cloning template' job.save_meta() vmid, mac = clone_vm(proxmox, template_id, name, user) print("[{}] Registering in STARRS.".format(name)) job.meta['status'] = 'registering in STARRS' job.save_meta() ip = get_next_ip(starrs, app.config['STARRS_IP_RANGE']) register_starrs(starrs, name, app.config['STARRS_USER'], mac, ip) get_vm_expire(db, vmid, app.config['VM_EXPIRE_MONTHS']) print("[{}] Giving Proxmox some time to finish cloning.".format(name)) job.meta['status'] = 'waiting for Proxmox' time.sleep(15) print("[{}] Setting CPU and memory.".format(name)) job.meta['status'] = 'setting CPU and memory' job.save_meta() vm = VM(vmid) vm.set_cpu(cores) vm.set_mem(memory) print("[{}] Applying cloud-init config.".format(name)) job.meta['status'] = 'applying cloud-init' vm.set_ci_user(user) vm.set_ci_ssh_key(ssh_key) vm.set_ci_network() print( "[{}] Waiting for STARRS to propogate before starting VM.".format( name)) job.meta['status'] = 'waiting for STARRS' job.save_meta() time.sleep(90) print("[{}] Starting VM.".format(name)) job.meta['status'] = 'starting VM' job.save_meta() vm.start() print("[{}] Template successfully provisioned.".format(name)) job.meta['status'] = 'completed' job.save_meta()
def create_vm_task(user, name, cores, memory, disk, iso): with app.app_context(): job = get_current_job() proxmox = connect_proxmox() db = connect_db() starrs = connect_starrs() job.meta['status'] = 'creating VM' job.save_meta() vmid, mac = create_vm(proxmox, user, name, cores, memory, disk, iso) job.meta['status'] = 'registering in STARRS' job.save_meta() register_starrs(starrs, name, app.config['STARRS_USER'], mac, get_next_ip(starrs, app.config['STARRS_IP_RANGE'])) job.meta['status'] = 'setting VM expiration' job.save_meta() get_vm_expire(db, vmid, app.config['VM_EXPIRE_MONTHS']) job.meta['status'] = 'complete' job.save_meta()
def vms(self): proxmox = connect_proxmox() try: # try to get the users vms from their pool vms = proxmox.pools(self.name).get()['members'] except ResourceException: # they likely don't have a pool yet, try to create it if is_user(self.name): proxmox.pools.post(poolid=self.name, comment='Managed by Proxstar') # if created, their pool is empty so return empty array return [] else: return [] for vm in vms: if 'name' not in vm: vms.remove(vm) vms = sorted(vms, key=lambda k: k['name']) return vms
def process_expiring_vms_task(): with app.app_context(): proxmox = connect_proxmox() db = connect_db() starrs = connect_starrs() pools = get_pools(proxmox, db) for pool in pools: user = User(pool) expiring_vms = [] vms = user.vms for vm in vms: vm = VM(vm['vmid']) days = (vm.expire - datetime.date.today()).days if days in [10, 7, 3, 1, 0]: name = vm.name expiring_vms.append([vm.name, days]) if days == 0: vm.stop() if expiring_vms: send_vm_expire_email('com6056', expiring_vms)
def shutdown(self): proxmox = connect_proxmox() proxmox.nodes(self.node).qemu(self.id).status.shutdown.post()
def stop(self): proxmox = connect_proxmox() proxmox.nodes(self.node).qemu(self.id).status.stop.post()
def set_mem(self, mem): proxmox = connect_proxmox() proxmox.nodes(self.node).qemu(self.id).config.put(memory=mem)
def set_cpu(self, cores): proxmox = connect_proxmox() proxmox.nodes(self.node).qemu(self.id).config.put( cores=cores, sockets=1)
def delete(self): proxmox = connect_proxmox() proxmox.nodes(self.node).qemu(self.id).delete()
def node(self): proxmox = connect_proxmox() for vm in proxmox.cluster.resources.get(type='vm'): if vm['vmid'] == int(self.id): return vm['node']
def set_ci_network(self): proxmox = connect_proxmox() proxmox.nodes(self.node).qemu(self.id).config.put(ipconfig0='ip=dhcp')
def set_ci_ssh_key(self, ssh_key): proxmox = connect_proxmox() escaped_key = urllib.parse.quote(ssh_key, safe='') proxmox.nodes(self.node).qemu(self.id).config.put(sshkeys=escaped_key)
def set_ci_user(self, user): proxmox = connect_proxmox() proxmox.nodes(self.node).qemu(self.id).config.put(ciuser=user)
def resize_disk(self, disk, size): proxmox = connect_proxmox() proxmox.nodes(self.node).qemu(self.id).resize.put( disk=disk, size="+{}G".format(size))
def mount_iso(self, iso): proxmox = connect_proxmox() proxmox.nodes(self.node).qemu( self.id).config.post(ide2="{},media=cdrom".format(iso))
def eject_iso(self): proxmox = connect_proxmox() proxmox.nodes(self.node).qemu( self.id).config.post(ide2='none,media=cdrom')
def suspend(self): proxmox = connect_proxmox() proxmox.nodes(self.node).qemu(self.id).status.suspend.post()
def get_config(self): proxmox = connect_proxmox() return proxmox.nodes(self.node).qemu(self.id).config.get()
def get_info(self): proxmox = connect_proxmox() return proxmox.nodes(self.node).qemu(self.id).status.current.get()
def resume(self): proxmox = connect_proxmox() proxmox.nodes(self.node).qemu(self.id).status.resume.post()
def start_vnc(self, port): proxmox = connect_proxmox() port = str(int(port) - 5900) proxmox.nodes(self.node).qemu(self.id).monitor.post( command="change vnc 127.0.0.1:{}".format(port))