def is_request_in_queue(vm_id, task_type, snapshot_id=None): """ Generic function to check if for a given VM, task of given type is already present in task_queue table""" #Check task_queue table task_data = db( (db.task_queue.task_type == task_type) & db.task_queue.status.belongs(TASK_QUEUE_STATUS_PENDING, TASK_QUEUE_STATUS_PROCESSING)).select() for task in task_data: params = task.parameters if params['vm_id'] == vm_id: if snapshot_id != None: if params['snapshot_id'] == snapshot_id: return True else: return True #Check if request is present in request_queue table _request = db((db.request_queue.parent_id == vm_id) & (db.request_queue.request_type == task_type) & db.request_queue.status.belongs( REQ_STATUS_REQUESTED, REQ_STATUS_VERIFIED, REQ_STATUS_APPROVED)).select() return True if _request else False
def check_vm_resource(request_id): req_data = db.request_queue[request_id] security_domain_id = req_data.security_domain vlans = db(db.security_domain.id == security_domain_id)._select( db.security_domain.vlan) avl_ip = db((~db.private_ip_pool.id.belongs( db(db.vm_data.private_ip != None)._select(db.vm_data.private_ip))) & (db.private_ip_pool.vlan.belongs(vlans))).count() message = None if req_data.request_type == VM_TASK_CREATE: if avl_ip == 0: message = "No private IPs available for security domain '%s" % req_data.security_domain.name if req_data.public_ip: if db(~db.public_ip_pool.id.belongs( db(db.vm_data.public_ip != None)._select( db.vm_data.public_ip))).count() == 0: message = "" if message == None else message + ", " message += "No public IP available" elif req_data.request_type == VM_TASK_CLONE: if avl_ip < req_data.clone_count: message = "%s private IP(s) available for security domain '%s" % ( str(avl_ip), req_data.security_domain.name) return message if message != None else 'Success'
def processTaskQueue(task_id): try: process=db.task_queue[task_id] task_queue_query = db(db.task_queue.id==task_id) task_event_query = db((db.task_queue_event.task_id==task_id) & (db.task_queue_event.status != TASK_QUEUE_STATUS_IGNORE)) #Update attention_time for task in the event table task_event_query.update(attention_time=get_datetime()) #Call the corresponding function from vm_helper ret = task[process['task_type']](process['parameters']) #On return, update the status and end time in task event table task_event_query.update(status=ret[0], end_time=get_datetime()) if ret[0] == TASK_QUEUE_STATUS_FAILED: #For failed task, change task status to Failed, it can be marked for retry by admin later task_queue_query.update(status=TASK_QUEUE_STATUS_FAILED) #Update task event with the error message task_event_query.update(error=ret[1],status=TASK_QUEUE_STATUS_FAILED) elif ret[0] == TASK_QUEUE_STATUS_SUCCESS: # For successful task, delete the task from queue task_queue_query.delete() db.commit() logger.debug("Task done") except Exception as e: logger.error(e) etype, value, tb = sys.exc_info() msg=''.join(traceback.format_exception(etype, value, tb, 10)) db(db.task_queue.id==task_id).update(status=-1)
def processTaskQueue(task_id): try: process = db.task_queue[task_id] task_queue_query = db(db.task_queue.id == task_id) task_event_query = db((db.task_queue_event.task_id == task_id) & ( db.task_queue_event.status != TASK_QUEUE_STATUS_IGNORE)) #Update attention_time for task in the event table task_event_query.update(attention_time=get_datetime()) #Call the corresponding function from vm_helper ret = task[process['task_type']](process['parameters']) #On return, update the status and end time in task event table task_event_query.update(status=ret[0], end_time=get_datetime()) if ret[0] == TASK_QUEUE_STATUS_FAILED: #For failed task, change task status to Failed, it can be marked for retry by admin later task_queue_query.update(status=TASK_QUEUE_STATUS_FAILED) #Update task event with the error message task_event_query.update(error=ret[1], status=TASK_QUEUE_STATUS_FAILED) elif ret[0] == TASK_QUEUE_STATUS_SUCCESS: # For successful task, delete the task from queue task_queue_query.delete() db.commit() logger.debug("Task done") except Exception as e: logger.error(e) etype, value, tb = sys.exc_info() msg = ''.join(traceback.format_exception(etype, value, tb, 10)) db(db.task_queue.id == task_id).update(status=-1)
def get_migrate_vm_form(vm_id): host_id = db(db.vm_data.id == vm_id).select( db.vm_data.host_id).first()['host_id'] host_options = [ OPTION(host.host_ip, _value=host.id) for host in db(db.host.id != host_id).select() ] form = FORM( TABLE( TR('VM Name:', INPUT(_name='vm_name', _readonly=True)), TR('Current Host:', INPUT(_name='current_host', _readonly=True)), TR( 'Destination Host:', SELECT( *host_options, **dict(_name='destination_host', requires=IS_IN_DB(db, 'host.id')))), TR('', INPUT(_type='submit', _value='Migrate')))) form.vars.vm_name = db(db.vm_data.id == vm_id).select( db.vm_data.vm_name).first()['vm_name'] form.vars.current_host = db(db.host.id == host_id).select( db.host.host_ip).first()['host_ip'] if is_vm_running(vm_id): add_live_migration_option(form) return form
def get_host_details(vm_id): host_data = db(db.host_affinity.vm_id == vm_id).select().first() host_details = {} if host_data != None : host_details['available_hosts'] = dict((host.affinity_host, "%s"%(host.affinity_host.host_name)) for host in db((db.host_affinity.vm_id == host_data.vm_id)).select()) return host_details
def get_available_public_ip(): public_ip_pool = db((~db.public_ip_pool.id.belongs(db(db.vm_data.public_ip != None)._select(db.vm_data.public_ip))) & (~db.public_ip_pool.id.belongs(db(db.host.public_ip != None)._select(db.host.public_ip))) & (db.public_ip_pool.is_active == True)) \ .select(db.public_ip_pool.ALL, orderby=db.public_ip_pool.public_ip) return public_ip_pool if public_ip_pool else None
def overload_memory(): logger.debug("Executing overload memory task") file_path_row = db( db.constants.name == "memory_overload_file_path").select( db.constants.value).first() file_path = file_path_row.value logger.debug(type(file_path)) host_ips_rows = db((db.host.status == HOST_STATUS_UP) & (db.host.host_ip == db.private_ip_pool.id)).select( db.private_ip_pool.private_ip) logger.debug(host_ips_rows) command2 = '/memhog >/memoryhog.out &' command3 = "ps -ef | grep memhog | grep -v grep | awk 'END{print FNR}'" for host_ip_row in host_ips_rows: logger.debug("overloading memory of" + str(host_ip_row)) logger.debug(type(host_ip_row['private_ip'])) _check_compile_folder(file_path) command1 = 'scp ' + str(file_path) + '/memhog root@' + str( host_ip_row['private_ip']) + ':/' logger.debug('executing' + command1) ret = os.system(command1) logger.debug('os.system return value' + str(ret)) output = execute_remote_cmd(host_ip_row['private_ip'], 'root', command3) ret1 = int(output[0]) if (ret1 == 0): ret = execute_remote_cmd(host_ip_row['private_ip'], 'root', command2) logger.debug(ret) logger.debug("Completed overload memory task")
def grant_vnc_access(vm_id): active_vnc = db((db.vnc_access.vm_id == vm_id) & (db.vnc_access.status == VNC_ACCESS_STATUS_ACTIVE)).count() if active_vnc > 0: msg = 'VNC access already granted. Please check your mail for further details.' else: vnc_count = db((db.vnc_access.vm_id == vm_id) & (db.vnc_access.time_requested > (get_datetime() - timedelta(days=1)))).count() if vnc_count >= MAX_VNC_ALLOWED_IN_A_DAY : msg = 'VNC request has exceeded limit.' else: try: create_vnc_mapping_in_nat(vm_id) vnc_info = db((db.vnc_access.vm_id == vm_id) & (db.vnc_access.status == VNC_ACCESS_STATUS_ACTIVE)).select() if vnc_info: vm_users = [] for user in db(db.user_vm_map.vm_id == vm_id).select(db.user_vm_map.user_id): vm_users.append(user['user_id']) send_email_vnc_access_granted(vm_users, vnc_info[0].vnc_server_ip, vnc_info[0].vnc_source_port, vnc_info[0].vm_id.vm_name, vnc_info[0].time_requested) else: raise msg = 'VNC access granted. Please check your mail for further details.' except: msg = 'Some Error Occurred. Please try later' log_exception() pass return msg
def get_security_domain_form(): db.security_domain.id.readable = False fields = (db.security_domain.name, db.security_domain.vlan) default_sort_order = [db.security_domain.id] create = True avl_vlan = db( ~db.vlan.id.belongs(db()._select(db.security_domain.vlan))).count() if avl_vlan == 0: create = False form = SQLFORM.grid( db.security_domain, fields=fields, orderby=default_sort_order, paginate=ITEMS_PER_PAGE, create=create, csv=False, searchable=False, details=False, selectable=False, showbuttontext=False, maxtextlength=30, links=[dict(header='Visibility', body=get_org_visibility)]) return form
def configure_host_by_mac(mac_addr): avl_private_ip = None ip_info = db.private_ip_pool(mac_addr=mac_addr) if ip_info: avl_private_ip = ip_info.private_ip else: avl_ip = db((~db.private_ip_pool.id.belongs(db()._select(db.host.host_ip))) & (db.private_ip_pool.vlan == HOST_VLAN_ID)).select(db.private_ip_pool.private_ip) if avl_ip.first(): ip_info = avl_ip.first() avl_private_ip = ip_info['private_ip'] if avl_private_ip: logger.debug('Available IP for mac address %s is %s'%(mac_addr, avl_private_ip)) host_name = 'host'+str(avl_private_ip.split('.')[3]) create_dhcp_entry(host_name, mac_addr, avl_private_ip) db.host[0] = dict(host_ip=ip_info['id'], host_name=host_name, mac_addr=mac_addr, status=HOST_STATUS_DOWN) return 'Host configured. Proceed for PXE boot.' else: logger.error('Available Private IPs for host are exhausted.') return 'Available Private IPs for host are exhausted.'
def process_purge_shutdownvm(): logger.info("ENTERING PURGE SHUTDOWN VM ........") vmShutDownDays = config.get("GENERAL_CONF", "shutdown_vm_days") try: # Fetch all the VM's which are locked and whose delete warning date is not null. for vm_data in db(db.vm_data.locked == True and db.vm_data.delete_warning_date!=None).select(db.vm_data.ALL): daysDiff=0 daysDiff=(get_datetime()-vm_data.delete_warning_date).days if(daysDiff >=0 ): for vm_details in db(db.vm_event_log.vm_id==vm_data.id).select(db.vm_event_log.ALL,orderby = ~db.vm_event_log.id,limitby=(0,1)): daysDiff=(get_datetime()-vm_details.timestamp).days if(vm_details.new_value == "Shutdown" and int(daysDiff)>=int(vmShutDownDays)): logger.info("Need to delete the VM ID:"+str(vm_data.id)) add_vm_task_to_queue(vm_data.id,VM_TASK_DELETE) # make an entry in task queue so that scheduler can pick up and delete the VM. else: logger.info("No need to delete the VM ID:"+str(vm_data.id)+" as it is in use now. ") db(db.vm_data.id == vm_details.vm_id).update(locked='F',delete_warning_date=None) else: logger.info("No need to process shutdown VM :"+str(vm_data.id)) except: log_exception() pass finally: db.commit() logger.debug("EXITING PURGE SHUTDOWN VM ........")
def get_all_orglevel_vm_list(): users_of_same_org = db(auth.user.organisation_id == db.user.organisation_id)._select(db.user.id) vms = db((db.vm_data.status.belongs(VM_STATUS_RUNNING, VM_STATUS_SUSPENDED, VM_STATUS_SHUTDOWN)) & (db.vm_data.owner_id.belongs(users_of_same_org))).select(db.vm_data.ALL) return get_hosted_vm_list(vms)
def get_available_private_ip(security_domain_id): vlans = db(db.security_domain.id == security_domain_id)._select(db.security_domain.vlan) private_ip_pool = db((~db.private_ip_pool.id.belongs(db(db.vm_data.private_ip != None)._select(db.vm_data.private_ip))) & (~db.private_ip_pool.id.belongs(db(db.host.host_ip != None)._select(db.host.host_ip))) & (db.private_ip_pool.vlan.belongs(vlans))).select(db.private_ip_pool.ALL, orderby=db.private_ip_pool.private_ip) return private_ip_pool if private_ip_pool else None
def get_verified_vm_list(): users_of_same_org = db(auth.user.organisation_id == db.user.organisation_id).select(db.user.id) vms = db(((db.vm_data.status == VM_STATUS_VERIFIED) | (db.vm_data.status == VM_STATUS_APPROVED)) & (db.vm_data.requester_id.belongs(users_of_same_org))).select(db.vm_data.ALL) return get_pending_vm_list(vms)
def get_host_details(vm_name): host_data = db(db.host_affinity.vm_name == vm_name).select().first() host_details = {} logger.debug("host data is : " + str(host_data)) if host_data != None : host_details['current_host'] = "%s" %(host_data.current_host) host_details['available_hosts'] = dict((host.id, "%s"%(host.affinity_host)) for host in db((db.host_affinity.vm_name == host_data.vm_name)).select()) return host_details
def delete_user_cont_access(cont_id, user_id) : cont_data = db.container_data[cont_id] if cont_data.owner_id == user_id: cont_data.update_record(owner_id = -1) if cont_data.requester_id == user_id: cont_data.update_record(requester_id = -1) db((db.user_container_map.cont_id == cont_id) & (db.user_container_map.user_id == user_id)).delete()
def approve_vm_request(vm_id): db(db.vm_data.id == vm_id).update(status=VM_STATUS_APPROVED) vm_data = db(db.vm_data.id == vm_id).select().first() add_user_to_vm(vm_data.owner_id, vm_id) if (vm_data.owner_id != vm_data.requester_id): add_user_to_vm(vm_data.requester_id, vm_id) add_vm_task_to_queue(vm_id, TASK_TYPE_CREATE_VM)
def get_users_with_roles(): all_users = db((db.user.registration_key == "") & (db.user.block_user == False)).select() for user in all_users: user['organisation'] = user.organisation_id.name roles=[] for membership in db(db.user_membership.user_id == user.id).select(db.user_membership.group_id): roles.extend([membership.group_id]) user['roles'] = roles return all_users
def delete_user_vm_access(vm_id, user_id) : vm_data = db.vm_data[vm_id] if vm_data.owner_id == user_id: vm_data.update_record(owner_id = -1) if vm_data.requester_id == user_id: vm_data.update_record(requester_id = -1) db((db.user_vm_map.vm_id == vm_id) & (db.user_vm_map.user_id == user_id)).delete()
def approve_vm_request(vm_id): db(db.vm_data.id == vm_id).update(status=VM_STATUS_APPROVED) vm_data = db(db.vm_data.id == vm_id).select().first() add_user_to_vm(vm_data.owner_id, vm_id) if(vm_data.owner_id != vm_data.requester_id): add_user_to_vm(vm_data.requester_id, vm_id) add_vm_task_to_queue(vm_id, TASK_TYPE_CREATE_VM)
def delete_host_from_db(host_id): host_data = db.host[host_id] host_ip = host_data.host_ip.private_ip private_ip_data = db.private_ip_pool(private_ip = host_ip) if private_ip_data: remove_dhcp_entry(host_data.host_name, private_ip_data['private_ip']) db(db.scheduler_task.uuid == (UUID_VM_UTIL_RRD + "=" + str(host_ip))).delete() del db.host[host_id]
def get_pending_requests(): if is_moderator(): vm_query = db(db.vm_data.status == VM_STATUS_REQUESTED) else: vm_query = db((db.vm_data.status == VM_STATUS_REQUESTED) & (db.vm_data.owner_id == auth.user.id)) vms = vm_query.select(db.vm_data.ALL) return get_pending_vm_list(vms)
def get_my_task_list(task_status, task_num): """Gets list of tasks requested by the user or task on any of users's VM""" task_query = db((db.task_queue_event.status.belongs(task_status)) & ((db.task_queue_event.vm_id.belongs( db(auth.user.id == db.user_vm_map.user_id)._select(db.user_vm_map.vm_id))) | (db.task_queue_event.requester_id == auth.user.id))) events = task_query.select(db.task_queue_event.ALL, distinct=True, orderby = ~db.task_queue_event.start_time, limitby=(0,task_num)) return get_task_list(events)
def check_delete_template(template_id): if db.vm_data(saved_template = template_id): add_vm_task_to_queue(-1, VM_TASK_DELETE_TEMPLATE, {'template_id' : template_id}) if db.vm_data(template_id = template_id): db(db.template.id== template_id).update(is_active=False) return False return True
def get_all_orglevel_vm_list(): users_of_same_org = db( auth.user.organisation_id == db.user.organisation_id)._select( db.user.id) vms = db((db.vm_data.status.belongs(VM_STATUS_RUNNING, VM_STATUS_SUSPENDED, VM_STATUS_SHUTDOWN)) & (db.vm_data.owner_id.belongs(users_of_same_org))).select( db.vm_data.ALL) return get_hosted_vm_list(vms)
def get_pending_request_query(statusList): if is_moderator(): _query = db(db.request_queue.status.belongs(statusList)) elif is_orgadmin(): users_of_same_org = db(auth.user.organisation_id == db.user.organisation_id)._select(db.user.id) _query = db((db.request_queue.status.belongs(statusList)) & (db.request_queue.requester_id.belongs(users_of_same_org))) else: _query = db((db.request_queue.status.belongs(statusList)) & (db.request_queue.owner_id == auth.user.id)) return _query
def get_verified_vm_list(): users_of_same_org = db( auth.user.organisation_id == db.user.organisation_id).select( db.user.id) vms = db(((db.vm_data.status == VM_STATUS_VERIFIED) | (db.vm_data.status == VM_STATUS_APPROVED)) & (db.vm_data.requester_id.belongs(users_of_same_org))).select( db.vm_data.ALL) return get_pending_vm_list(vms)
def get_all_orglevel_vm_list(): users_of_same_org = db(db(auth.user.id == db.user.id).select(db.user.organisation_id).first()['organisation_id'] == db.user.organisation_id).select(db.user.id) vms = db(((db.vm_data.status == VM_STATUS_RUNNING) | (db.vm_data.status == VM_STATUS_SUSPENDED) | (db.vm_data.status == VM_STATUS_SHUTDOWN)) & (db.user_vm_map.user_id.belongs(users_of_same_org)) & (db.user_vm_map.vm_id == db.vm_data.id)).select(db.vm_data.ALL) return get_hosted_vm_list(vms)
def send_shutdown_email_to_all(): vms = db(db.vm_data.status.belongs(VM_STATUS_RUNNING, VM_STATUS_SUSPENDED)).select() #data structures to store user data; used in sending email user_vms = {} user_name = {} user_email_ids = set() for vm_data in vms: owner_info = get_user_details(vm_data.owner_id) #adding unique usernames to dict with email_id as key user_name[owner_info[1]] = owner_info[0] #storing VM_name in dict with user email-id as key if owner_info[1] not in user_vms: user_vms[owner_info[1]] = vm_data.vm_name else: user_vms[owner_info[1]] += ", " + vm_data.vm_name #extracting unique emil ids from owner_info user_email_ids.add(owner_info[1]) cc_user_list = [] for user in db(db.user_vm_map.vm_id == vm_data.id).select( db.user_vm_map.user_id): if user.user_id != vm_data.owner_id: user_info = get_user_details(user.user_id) cc_user_list.append(user_info[1]) if user_info[1] not in user_vms: user_vms[user_info[1]] = vm_data.vm_name else: user_vms[user_info[1]] += ", " + vm_data.vm_name user_email_ids.add(user_info[1]) user_name[user_info[1]] = user_info[0] #logger.info("\nUser of VM: " + str(user_info)) logger.info("VM name: " + vm_data.vm_name + "\tOwner: " + str(owner_info[0]) + "\tand other users: " + str(cc_user_list)) logger.info("Sending mail to user_email_ids " + str(user_email_ids)) #iterating on all unique email_ids to send email for email_id in user_email_ids: context = dict(userName=user_name[email_id], userVMs=user_vms[email_id]) logger.info("Sending mail to: " + email_id) logger.info("User VMs: " + user_vms[email_id]) send_email(email_id, BAADAL_SHUTDOWN_SUBJECT, BAADAL_SHUTDOWN_BODY, context) import time time.sleep(30)
def get_all_orglevel_vm_list(): users_of_same_org = db( db(auth.user.id == db.user.id).select(db.user.organisation_id).first() ['organisation_id'] == db.user.organisation_id).select(db.user.id) vms = db(((db.vm_data.status == VM_STATUS_RUNNING) | (db.vm_data.status == VM_STATUS_SUSPENDED) | (db.vm_data.status == VM_STATUS_SHUTDOWN)) & (db.user_vm_map.user_id.belongs(users_of_same_org)) & (db.user_vm_map.vm_id == db.vm_data.id)).select(db.vm_data.ALL) return get_hosted_vm_list(vms)
def section(): try: treerefs, flatrefs = get_tree() chapnum = request.args[0] if request.args else sorted(treerefs.keys())[0] chapid = db.chapter_titles(db.chapter_titles.num == chapnum).id currsec = request.args[1] if len(request.args) > 0 else treerefs[chapnum][0] title1 = db.chapter_titles(db.chapter_titles.num == chapnum).title pars = db(db.paragraphs.chapter_id == chapid).select().as_list() pars.sort(key=lambda p: (int(p['section']), int(p['subsection']))) prevnode, nextnode = get_nav_refs(chapnum, currsec, flatrefs) print 'chapid =', chapid print 'chapnum =', chapnum secrows = db(db.section_titles.chapter_num == chapid).select().as_list() print 'secrows' pprint(secrows) secrows.sort(key=lambda p: int(p['section_num'])) sectitles = {s['section_num']: s['title'] for s in secrows} print 'sectitles' pprint(sectitles) sections = {} for mysec in treerefs[int(chapnum)]: paragraphs = [] secpars = [p for p in pars if int(p['section']) == int(mysec)] for p in secpars: mypar = {} num = '.'.join([s for s in [str(chapnum), str(mysec), str(p['subsection'])] if s]) mypar['num'] = num mypar['par_title'] = p['display_title'] mypar['text'] = p['body'] print 'parsed text for', num mypar['auds'] = get_audio(p) mypar['images'] = get_images(p) paragraphs.append(mypar) sections[mysec] = paragraphs return {'title1': title1, 'sections': sections, 'sectitles': sectitles, 'prevref': prevnode, 'treerefs': session.treerefs, 'nextref': nextnode, 'currsec': currsec} except Exception: print traceback.format_exc(5)
def reset_host_affinity(vm_id,key): logger.debug("inside reset_host_affinity") host_data = db(db.host_affinity.id == key).select().first() logger.debug("host data is : " + str(host_data)) vm_details = get_migrate_vm_details(vm_id) logger.debug("vm_details is : " + str(vm_details)) logger.debug("current host is : " + str(vm_details['current_host'])) if vm_details['current_host'] in host_data['affinity_host'] : logger.debug("inside if part") return "we can not delete this host affinity because currently vm is on this host !!" else : logger.debug("inside else part") db(db.host_affinity.id==key).delete() db(db.vm_data.id == vm_details['vm_id']).update(affinity_flag=0)
def grant_novnc_access(vm_id): msg = "" active_vnc = db((db.vnc_access.vm_id == vm_id) & ( db.vnc_access.status == VNC_ACCESS_STATUS_ACTIVE)).count() if active_vnc > 0: vm_data = db(db.vnc_access.vm_id == vm_id).select().first() token = vm_data.token msg = 'VNC access already granted. Please check your mail for further details.' else: vnc_count = db((db.vnc_access.vm_id == vm_id) & (db.vnc_access.time_requested > (get_datetime() - timedelta(days=1)))).count() if vnc_count >= MAX_VNC_ALLOWED_IN_A_DAY: msg = 'VNC request has exceeded limit.' else: try: f = os.popen('openssl rand -hex 10') token = f.read() token = token.split("\n") token = token[0] create_novnc_mapping(vm_id, token) vm_data = db(db.vm_data.id == vm_id).select().first() host_ip = vm_data.host_id.host_ip.private_ip vnc_port = vm_data.vnc_port vnc = str(vnc_port) file_token = str(token) + ":" + " " + str(host_ip) + ":" + str( vnc) + "\n" myfile = get_file_append_mode("/home/www-data/token.list") myfile.write(file_token) command = "ps -ef | grep websockify|awk '{print $2}'" port = config.get("NOVNC_CONF", "port") server_ip = config.get("NOVNC_CONF", "server_ip") return_value = execute_remote_cmd(server_ip, 'root', command) return_value = return_value.split() if len(return_value) <= 2: command = "./noVNC/utils/websockify/run --web /root/noVNC --target-config /home/www-data/token.list " + str( server_ip) + ":" + str(port) + " > /dev/null 2>&1 &" return_value = execute_remote_cmd(server_ip, 'root', command) msg = 'VNC access granted. Please check your mail for further details.' except: logger.debug('Some Error Occurred. Please try later') log_exception() pass logger.debug(msg) return token
def get_security_domain_form(): db.security_domain.id.readable=False fields = (db.security_domain.name, db.security_domain.vlan) default_sort_order=[db.security_domain.id] create = True avl_vlan = db(~db.vlan.id.belongs(db()._select(db.security_domain.vlan))).count() if avl_vlan == 0: create = False form = SQLFORM.grid(db.security_domain, fields=fields, orderby=default_sort_order, paginate=ITEMS_PER_PAGE, create=create, csv=False, searchable=False, details=False, selectable=False, showbuttontext=False, maxtextlength=30, links=[dict(header='Visibility', body=get_org_visibility)]) return form
def add_data_into_affinity(params,vm_details): current_host = vm_details['current_host'] current_host = current_host.split("(") current_host = current_host[0] #if params['affinity_host'] != 0 : if isinstance(params['affinity_host'], str): host_details = params['affinity_host'].split() else : host_details = params['affinity_host'] for item in host_details: db(db.vm_data.id == vm_details['vm_id']).update(affinity_flag=1) if db.host_affinity(affinity_host=item): db(db.host_affinity.affinity_host == item).update(affinity_host=item) else : db.host_affinity.insert(vm_id=vm_details['vm_id'],vm_name=vm_details['vm_name'], current_host=current_host, affinity_host=item)
def delete_vm_info(vm_identity): vm_details = db(db.vm_data.vm_identity == vm_identity).select().first() # updating the used entry of database if vm_details.HDD != None: db(db.datastore.id == vm_details.datastore_id).update(used = int(vm_details.datastore_id.used) - \ (int(vm_details.HDD) + int(vm_details.template_id.hdd))) if vm_details.public_ip != None: remove_mapping(vm_details.public_ip.public_ip, vm_details.private_ip.private_ip) #this will delete vm_data entry and also its references db(db.vm_data.id == vm_details.id).delete() return
def delete_vm_info(vm_identity): vm_details = db(db.vm_data.vm_identity == vm_identity).select().first() # updating the used entry of database if vm_details.HDD != None: db(db.datastore.id == vm_details.datastore_id).update(used = int(vm_details.datastore_id.used) - \ (int(vm_details.HDD) + int(vm_details.template_id.hdd))) if vm_details.public_ip != None: remove_mapping(vm_details.public_ip, vm_details.private_ip) #this will delete vm_data entry and also its references db(db.vm_data.id == vm_details.id).delete() return
def get_pending_request_query(statusList): if is_moderator(): _query = db(db.request_queue.status.belongs(statusList)) elif is_orgadmin(): users_of_same_org = db( auth.user.organisation_id == db.user.organisation_id)._select( db.user.id) _query = db((db.request_queue.status.belongs(statusList)) & (db.request_queue.requester_id.belongs(users_of_same_org))) else: _query = db((db.request_queue.status.belongs(statusList)) & (db.request_queue.owner_id == auth.user.id)) return _query
def get_vm_snapshots(vm_id): vm_snapshots_list = [] for snapshot in db(db.snapshot.vm_id == vm_id).select(): snapshot_dict = {'id': snapshot.id} snapshot_dict['type'] = get_snapshot_type(snapshot.type) snapshot_dict['name'] = snapshot.snapshot_name if snapshot.type == SNAPSHOT_USER: snapshot_dict['delete'] = A(IMG(_src=URL( 'static', 'images/delete-snapshot.gif'), _style='height:20px;weight:20px'), _href=URL(r=request, f='delete_snapshot', args=[vm_id, snapshot.id]), _title="Delete this snapshot", _alt="Delete this snapshot") else: snapshot_dict['delete'] = ' ' snapshot_dict['revert'] = A(IMG(_src=URL( 'static', 'images/revertTosnapshot.png'), _style='height:20px;weight:20px'), _href=URL(r=request, f='revert_to_snapshot', args=[vm_id, snapshot.id]), _title="Revert to this snapshot", _alt="Revert to this snapshot") vm_snapshots_list.append(snapshot_dict) return vm_snapshots_list
def add_data_into_affinity(params, vm_id): # current_host = vm_details['current_host'] # current_host = current_host.split("(") # current_host = current_host[0] if isinstance(params['affinity_host'], str): host_details = params['affinity_host'].split() else : host_details = params['affinity_host'] if host_details != None: for host_name in host_details: db(db.vm_data.id == vm_id).update(affinity_flag=1) if db.host_affinity(affinity_host=host_name): db(db.host_affinity.affinity_host == host_name).update(affinity_host=host_name) else : db.host_affinity.insert(vm_id=vm_id, affinity_host=host_name) return host_details
def specify_user_roles(user_id, user_roles): message = None try: if not user_roles: message = "Only user role activated for user" else: for role in user_roles: db.user_membership.insert(user_id=user_id, group_id=role) message = "User Activated with specified roles" db(db.user.id == user_id).update(registration_key='') for row in db(db.user_group.role == USER).select(db.user_group.id): role_type_user = row.id db.user_membership.insert(user_id=user_id, group_id=role_type_user) except Exception: logger.debug("Ignoring duplicate role entry") return message
def get_edit_vm_config_form(vm_id): vm_data = db.vm_data[vm_id] db.request_queue.parent_id.default = vm_data.id db.request_queue.vm_name.default = vm_data.vm_name db.request_queue.RAM.default = vm_data.RAM db.request_queue.RAM.requires = IS_IN_SET(VM_RAM_SET, zero=None) db.request_queue.vCPU.default = vm_data.vCPU db.request_queue.vCPU.requires = IS_IN_SET(VM_vCPU_SET, zero=None) db.request_queue.HDD.default = vm_data.HDD db.request_queue.public_ip.default = (vm_data.public_ip != None) db.request_queue.security_domain.default = vm_data.security_domain db.request_queue.request_type.default = VM_TASK_EDIT_CONFIG db.request_queue.status.default = get_request_status() db.request_queue.requester_id.default = auth.user.id db.request_queue.owner_id.default = vm_data.owner_id _query = (db.security_domain.visible_to_all == True) | (db.security_domain.org_visibility.contains( vm_data.requester_id.organisation_id)) db.request_queue.security_domain.requires = IS_IN_DB(db(_query), 'security_domain.id', '%(name)s', zero=None) form_fields = [ 'vm_name', 'RAM', 'vCPU', 'public_ip', 'security_domain', 'purpose' ] form = SQLFORM(db.request_queue, fields=form_fields) return form
def enqueue_vm_request(request_id): req_data = db.request_queue[request_id] params={'request_id' : request_id} if req_data.request_type == VM_TASK_CLONE: create_clone_task(req_data, params) elif req_data.request_type == VM_TASK_CREATE: create_install_task(req_data, params) elif req_data.request_type == VM_TASK_EDIT_CONFIG: create_edit_config_task(req_data, params) elif req_data.request_type == VM_TASK_ATTACH_DISK: params.update({'disk_size' : req_data.attach_disk}) add_vm_task_to_queue(req_data.parent_id, req_data.request_type, params=params, requested_by=req_data.requester_id) db(db.request_queue.id == request_id).update(status=REQ_STATUS_IN_QUEUE)
def process_snapshot_vm(snapshot_type, vm_id = None, frequency=None): """ Handles snapshot task Invoked when scheduler runs task of type 'snapshot_vm'""" logger.debug("ENTERING SNAPSHOT VM TASK........Snapshot Type: %s"% snapshot_type) try: if snapshot_type == SNAPSHOT_SYSTEM: params={'snapshot_type' : frequency, 'vm_id' : vm_id} task[VM_TASK_SNAPSHOT](params) else: vms = db(db.vm_data.status.belongs(VM_STATUS_RUNNING, VM_STATUS_SUSPENDED, VM_STATUS_SHUTDOWN)).select() for vm_data in vms: flag = vm_data.snapshot_flag if(snapshot_type & flag): logger.debug("snapshot_type" + str(snapshot_type)) vm_scheduler.queue_task(TASK_SNAPSHOT, group_name = 'snapshot_task', pvars = {'snapshot_type' : SNAPSHOT_SYSTEM, 'vm_id' : vm_data.id, 'frequency' : snapshot_type}, start_time = request.now, timeout = 60 * MINUTES) except: log_exception() pass finally: db.commit() logger.debug("EXITING SNAPSHOT VM TASK........")
def host_networking(): logger.debug("collecting host networking data") active_host_list= db((db.host.status == HOST_STATUS_UP) and (db.host.host_ip == db.private_ip_pool.id)).select(db.private_ip_pool.private_ip) active_host_name=db(db.host.status == HOST_STATUS_UP).select(db.host.host_name) logger.debug( "active_host_list:" + str(active_host_list)) logger.debug( "active_host_name:" + str(active_host_name)) active_host_no=len(active_host_list) host_name_list=[] host_ip_list=[] for i in xrange(0,active_host_no): host_ip_list.append(active_host_list[i].private_ip) host_name_list.append(active_host_name[i].host_name) logger.debug( host_ip_list) logger.debug( host_name_list) collect_data_from_host(host_ip_list,host_name_list) logger.debug("collected host networking data")
def page(): """ """ pagelabel = request.args[0] pagerow = db(db.pages.page_label == pagelabel).select().first() return {'body': pagerow['body'], 'title': pagerow['title']}