def install(config): osc = OSCommands(MODULE_NAME) mysqlpass = config.get("database", "mysql_passwd") mysqluname = config.get("database", "mysql_uname") osc.recursive_rpl("MYSQL_ADMINUSER_PASSWD", mysqlpass, "/usr/share/freenest-core/www") osc.recursive_rpl("MYSQL_UNAME", mysqluname, "/usr/share/freenest-core/www")
def runUrlFlask(): if platform.system() == "Windows": app.run(host=get('server', 'serverhost'), port=get('server', 'serverport'), debug=True) else: import gunicorn.app.base class StandaloneApplication(gunicorn.app.base.BaseApplication): def __init__(self, app, options=None): self.options = options or {} self.application = app super(StandaloneApplication, self).__init__() def load_config(self): _config = dict([(key, value) for key, value in iteritems(self.options) if key in self.cfg.settings and value is not None]) for key, value in iteritems(_config): self.cfg.set(key.lower(), value) def load(self): return self.application _options = { 'bind': '%s:%s' % (get('server', 'serverhost'), get('server', 'serverport')), 'workers': 4, 'accesslog': '-', # log to stdout 'access_log_format': '%(h)s %(l)s %(t)s "%(r)s" %(s)s "%(a)s"' } StandaloneApplication(app, _options).run()
def novnc_access(): token=request.vars['token'] port = config.get("NOVNC_CONF","port") url_ip = config.get("NOVNC_CONF","url_ip") url = "http://"+ str(url_ip)+ ":" + str(port)+"/vnc_auto.html?path=?token=" + str(token) return redirect(url)
def install(config): osc = OSCommands(MODULE_NAME) # check if slapd is installed and if not install it # exitcode = osc.cmdlog("dpkg-query -W -f='${Status}' slapd") # if not exitcode: # osc.cmdlog("apt-get remove -y --purge slapd ldap-utils") # osc.cmdlog("rm -rf /var/lib/ldap") # osc.cmdlog("apt-get install -y --force-yes slapd ldap-utils") # Set up the LDAP scripts ldap_pass = config.get("ldap", "root_password") freenest_adminuser_pass = config.get("ldap", "freenest_adminuser_password") freenest_adminuser_email = config.get("ldap", "freenest_adminuser_email") freenest_rssuser_pass = config.get("ldap", "freenest_rssuser_password") sha_pwd = crypted_password(ldap_pass, ENCRYPTION_SCHEME) ldap_script_path = settings.WORKING_DIR+"/install-default-ldap.sh" osc.rpl( "LDAP_ROOT_USER_NAME", settings.LDAP_ROOT_USER_NAME, ldap_script_path) osc.rpl( "LDAP_ADMIN_PASSWORD_HASH", sha_pwd[0], ldap_script_path) osc.rpl("LDAP_PATH_TO_TMPDIR", settings.WORKING_DIR, ldap_script_path) # Replace adminuser's password and email and also password for rssuser ldif_path = settings.WORKING_DIR+"/nest_users.ldif" osc.rpl("FREENEST_ADMINUSER_PASSWORD", freenest_adminuser_pass, ldif_path) osc.rpl("FREENEST_ADMINUSER_EMAIL", freenest_adminuser_email, ldif_path) osc.rpl("FREENEST_RSSUSER_PASSWORD", freenest_rssuser_pass, ldif_path) # Execute LDAP initiation script osc.cmdlog("cd "+settings.WORKING_DIR+"; sh ./install-default-ldap.sh")
def request_vpn(): user_info = get_vpn_user_details() logger.debug(type(user_info)) user_name = user_info['username'] cmd = "./vpn_client_creation.sh " + str(user_name) #vpn_ip="" vpn_ip = config.get("VPN_CONF", "vpn_server_ip") vpn_key_path = config.get("VPN_CONF", "vpn_key_path") #passwd="" # password=config.get("VPN_CONF","passwd") try: var = execute_remote_cmd(vpn_ip, 'root', cmd, ret_list=True) filepath = vpn_key_path + str(user_name) + "_baadalVPN.tar" localpath = os.path.join( get_context_path(), 'private/VPN/' + str(user_name) + "_baadalVPN.tar") sftp_files(vpn_ip, 'root', filepath, localpath) if "false" in str(var): return 1 elif "true" in str(var): return 3 #transfer_vpn_files(user_name,vpn_ip,password) except Exception: return 2
def host_power_down(host_data): try: host_ip = host_data.host_ip.private_ip if host_data.host_type == HOST_TYPE_VIRTUAL: output = execute_remote_cmd(host_ip, 'root', 'virsh destroy' + get_host_name[host_ip]) else: setup_type = config.get("GENERAL_CONF","setup_type") if setup_type == "nic": ucs_management_ip = config.get("UCS_MANAGER_DETAILS","ucs_management_ip") logger.debug(ucs_management_ip) logger.debug(type(ucs_management_ip)) ucs_user = config.get("UCS_MANAGER_DETAILS","ucs_user") logger.debug(ucs_user) logger.debug(type(ucs_user)) ucs_password = config.get("UCS_MANAGER_DETAILS","ucs_password") logger.debug(ucs_password) host_ip=str(host_ip) server_num=host_ip.split('.') ucs_server_num=str(int(server_num[3])-20) logger.debug("ucs server number is :"+ucs_server_num) ssh = paramiko.SSHClient() ssh.load_system_host_keys() ssh.connect(ucs_management_ip,username=ucs_user,password=ucs_password) stdin, stdout,stderr=ssh.exec_command("scope org / ; scope org IIT-Delhi ; scope service-profile Badal-Host"+ str(ucs_server_num) + " ; power down ; commit-buffer") # @UnusedVariable output=stdout.readlines() if len(output)!= 0: logger.debug("Host not powered up . Command not run properly ") else: output = execute_remote_cmd(host_ip, 'root', 'init 0') logger.debug(str(output) + ' ,Host shut down successfully !!!') except: log_exception()
def create_vnc_url(vm_id): token = grant_novnc_access(vm_id) if token : port = config.get("NOVNC_CONF","port") url_ip = config.get("NOVNC_CONF","url_ip") vnc_url = "http://" + str(url_ip) +":" + str(port)+"/vnc_auto.html?path=?token=" + str(token) return vnc_url
def send_email(to_address, email_subject, email_template, context, cc_addresses=[]): if to_address != None: email_template += MAIL_FOOTER context['adminEmail'] = config.get("MAIL_CONF","mail_admin_request") email_message = email_template.format(context) cc_addresses.append(config.get("MAIL_CONF","mail_sender")) logger.info("Email message is::"+str(email_message)) push_email(to_address, email_subject, email_message, [], cc_addresses)
def send_email_to_admin(email_subject, email_message, email_type): if email_type == 'report_bug': email_address = config.get("MAIL_CONF","mail_admin_bug_report") if email_type == 'request': email_address = config.get("MAIL_CONF","mail_admin_request") if email_type == 'complaint': email_address = config.get("MAIL_CONF","mail_admin_complaint") user_email_address = auth.user.email logger.info("MAIL ADMIN: type:"+email_type+", subject:"+email_subject+", message:"+email_message+", from:"+user_email_address) push_email(email_address, email_subject, email_message, user_email_address)
def fetch_ldap_user(username): ldap_url = config.get("LDAP_CONF","ldap_url") base_dn = config.get("LDAP_CONF","ldap_dn") import ldap try: l = ldap.open(ldap_url) l.protocol_version = ldap.VERSION3 except ldap.LDAPError, e: logger.error(e) return None
def send_email_to_user_manual(email_subject, email_message, vm_id): vm_users = [] context = dict(adminEmail = config.get("MAIL_CONF","mail_admin_request"), homeAddress = config.get("GENERAL_CONF","home_address")) for user in db(db.user_vm_map.vm_id == vm_id).select(db.user_vm_map.user_id): vm_users.append(user['user_id']) for vm_user in vm_users: user_info = get_user_details(vm_user) if user_info[1] != None: logger.info("MAIL USER: User Name: "+ user_info[0]) send_email(user_info[1], email_subject, email_message, context)
def fetch_user_role(username): _role_list = [current.USER] username_qt = '\'' + username +'\'' if username_qt in config.get("GENERAL_CONF","admin_uid"): _role_list.append(current.ADMIN) if username_qt in config.get("GENERAL_CONF","orgadmin_uid"): _role_list.append(current.ORGADMIN) if username_qt in config.get("GENERAL_CONF","faculty_uid"): _role_list.append(current.FACULTY) return _role_list
def send_email_to_user_manual(email_subject, email_message, vm_id): vm_users = [] context = dict(adminEmail=config.get("MAIL_CONF", "mail_admin_request"), homeAddress=config.get("GENERAL_CONF", "home_address")) for user in db(db.user_vm_map.vm_id == vm_id).select( db.user_vm_map.user_id): vm_users.append(user['user_id']) for vm_user in vm_users: user_info = get_user_details(vm_user) if user_info[1] != None: logger.info("MAIL USER: User Name: " + user_info[0]) send_email(user_info[1], email_subject, email_message, context)
class IITD_Oauth(OAuthAccount): auth_url = config.get("OAUTH_CONF", "auth_url") token_url = config.get("OAUTH_CONF", "token_url") def __init__(self, g=globals()): OAuthAccount.__init__(self, g, client_id=cl_id, client_secret=cl_secret, auth_url=self.auth_url, token_url=self.token_url, state='xyz') def get_user(self): token = self.accessToken() if not token: return None uri = config.get("OAUTH_CONF", "resource_url") r = requests.get(uri, params={'access_token': token}) userdata = r.json() user_info = {} if ' ' in userdata['name']: #user_info['first_name'],user_info['middle_name'],user_info['last_name']=userdata['name'].split() data = userdata['name'].split() if len(data) > 2: user_info['first_name'] = data[0] user_info['middle_name'] = data[1] user_info['last_name'] = data[2] else: user_info['first_name'] = data[0] user_info['last_name'] = data[1] else: user_info['first_name'] = userdata['name'] user_info['last_name'] = ' ' user_info['user_name'] = userdata['user_id'] user_info['email'] = userdata['email'] user_info['roles'] = fetch_user_role(user_info['user_name']) # If user has super admin rights; it is added to separate organization if current.ADMIN in user_info['roles']: user_info['organisation'] = 'ADMIN' else: user_info['organisation'] = 'IITD' create_or_update_user(user_info, False) return dict(first_name=user_info['first_name'], last_name=user_info['last_name'], email=userdata['email'], username=userdata['user_id'])
def grant_novnc_access(vm_id): msg = "" active_vnc = db((db.vnc_access.vm_id == vm_id) & ( db.vnc_access.status == VNC_ACCESS_STATUS_ACTIVE)).count() if active_vnc > 0: vm_data = db(db.vnc_access.vm_id == vm_id).select().first() token = vm_data.token msg = 'VNC access already granted. Please check your mail for further details.' else: vnc_count = db((db.vnc_access.vm_id == vm_id) & (db.vnc_access.time_requested > (get_datetime() - timedelta(days=1)))).count() if vnc_count >= MAX_VNC_ALLOWED_IN_A_DAY: msg = 'VNC request has exceeded limit.' else: try: f = os.popen('openssl rand -hex 10') token = f.read() token = token.split("\n") token = token[0] create_novnc_mapping(vm_id, token) vm_data = db(db.vm_data.id == vm_id).select().first() host_ip = vm_data.host_id.host_ip.private_ip vnc_port = vm_data.vnc_port vnc = str(vnc_port) file_token = str(token) + ":" + " " + str(host_ip) + ":" + str( vnc) + "\n" myfile = get_file_append_mode("/home/www-data/token.list") myfile.write(file_token) command = "ps -ef | grep websockify|awk '{print $2}'" port = config.get("NOVNC_CONF", "port") server_ip = config.get("NOVNC_CONF", "server_ip") return_value = execute_remote_cmd(server_ip, 'root', command) return_value = return_value.split() if len(return_value) <= 2: command = "./noVNC/utils/websockify/run --web /root/noVNC --target-config /home/www-data/token.list " + str( server_ip) + ":" + str(port) + " > /dev/null 2>&1 &" return_value = execute_remote_cmd(server_ip, 'root', command) msg = 'VNC access granted. Please check your mail for further details.' except: logger.debug('Some Error Occurred. Please try later') log_exception() pass logger.debug(msg) return token
def send_email_to_user(task_type, entity_name, request_time, user_list): for _user in user_list: user_info = get_user_details(_user) if user_info[1] != None: context = dict( entityName=entity_name, userName=user_info[0], taskType=task_type, requestTime=request_time.strftime("%A %d %B %Y %I:%M:%S %p")) if task_type == VM_TASK_CREATE: context.update( {'gatewayVM': config.get("GENERAL_CONF", "gateway_vm")}) send_email(user_info[1], VM_CREATION_SUBJECT, VM_CREATION_BODY, context) elif task_type == Object_Store_TASK_CREATE: send_email(user_info[1], OBJECT_CREATION_SUBJECT, OBJECT_CREATION_BODY, context) elif task_type in CONTAINER_TASKS: subject = TASK_COMPLETE_SUBJECT.format( dict(taskType=task_type)) send_email(user_info[1], subject, CONT_TASK_COMPLETE_BODY, context) else: subject = TASK_COMPLETE_SUBJECT.format( dict(taskType=task_type)) send_email(user_info[1], subject, TASK_COMPLETE_BODY, context)
def get_user(self): token=self.accessToken() if not token: return None uri=config.get("OAUTH_CONF","resource_url") r=requests.get(uri,params={'access_token':token}) userdata=r.json() user_info={} if ' ' in userdata['name']: user_info['first_name'],user_info['middle_name'],user_info['last_name']=userdata['name'].split() else: user_info['first_name']=userdata['name'] user_info['last_name']=' ' hd=userdata['hd'] user_info['user_name'] = userdata['user_id'] user_info['email'] = userdata['email'] user_info['roles'] = fetch_user_role(user_info['user_name']) # If user has super admin rights; it is added to separate organization if current.ADMIN in user_info['roles']: user_info['organisation'] = 'ADMIN' else: user_info['organisation'] = 'IITD' create_or_update_user(user_info, False) return dict(first_name=user_info['first_name'],last_name=user_info['last_name'],email=userdata['email'],username = userdata['user_id'] )
def process_purge_shutdownvm(): logger.info("ENTERING PURGE SHUTDOWN VM ........") vmShutDownDays = config.get("GENERAL_CONF", "shutdown_vm_days") try: # Fetch all the VM's which are locked and whose delete warning date is not null. for vm_data in db(db.vm_data.locked == True and db.vm_data.delete_warning_date!=None).select(db.vm_data.ALL): daysDiff=0 daysDiff=(get_datetime()-vm_data.delete_warning_date).days if(daysDiff >=0 ): for vm_details in db(db.vm_event_log.vm_id==vm_data.id).select(db.vm_event_log.ALL,orderby = ~db.vm_event_log.id,limitby=(0,1)): daysDiff=(get_datetime()-vm_details.timestamp).days if(vm_details.new_value == "Shutdown" and int(daysDiff)>=int(vmShutDownDays)): logger.info("Need to delete the VM ID:"+str(vm_data.id)) add_vm_task_to_queue(vm_data.id,VM_TASK_DELETE) # make an entry in task queue so that scheduler can pick up and delete the VM. else: logger.info("No need to delete the VM ID:"+str(vm_data.id)+" as it is in use now. ") db(db.vm_data.id == vm_details.vm_id).update(locked='F',delete_warning_date=None) else: logger.info("No need to process shutdown VM :"+str(vm_data.id)) except: log_exception() pass finally: db.commit() logger.debug("EXITING PURGE SHUTDOWN VM ........")
def install(config): osc = OSCommands(MODULE_NAME) hostname = config.get("network", "hostname") domain = config.get("network", "domain") # Replace domain name with the configured one osc.cmdlog( "rpl -q -R " + _RPL_DOMAIN + " " + domain + " " + settings.WORKING_DIR + "/logscripts") # Replace the more common hostname with the configured one osc.cmdlog("rpl -q -R " + _RPL_HOST + " " + hostname + " " + settings.WORKING_DIR + "/logscripts") # Copy the updated files osc.cmdlog("cp -r " + settings.WORKING_DIR + "/logscripts /nestcontrol/logscripts")
def process_sendwarning_unusedvm(): logger.info("Entering send warning to unused VM........") try: ''' performing daily checks for network usage ''' vmCPUThreshold = config.get("GENERAL_CONF", "cpu_threshold_limit") vmreadThreshold = config.get("GENERAL_CONF", "nwRead_threshold_limit") vmwriteThreshold = config.get("GENERAL_CONF", "nwWrite_threshold_limit") thresholdcontext = dict(CPUThreshold=vmCPUThreshold, ReadThreshold=vmreadThreshold, WriteThreshold=vmwriteThreshold) logger.info("checking network usage with threshold values as CPUThreshold is:"+str(thresholdcontext['CPUThreshold'])+" WriteThreshold is :"+str(thresholdcontext['WriteThreshold'])+" ReadThreshold is :"+ str(thresholdcontext['ReadThreshold'])) vms = db(db.vm_data.status.belongs(VM_STATUS_RUNNING, VM_STATUS_SUSPENDED) & (db.vm_data.shutdown_warning_date == None) & (db.vm_data.start_time < (get_datetime() - timedelta(days=20)))).select() '''check vm should have been created 20days back''' for vm in vms: logger.info("comparing threshold for the vm "+ str(vm.vm_identity)) send_email=0 retVal=compare_rrd_data_with_threshold(vm.vm_identity,thresholdcontext) if(retVal == True): vm_users = [] vm_name = "" for user in db((db.user_vm_map.vm_id == vm.id) & (db.user_vm_map.vm_id == db.vm_data.id) & (db.vm_data.shutdown_warning_date == None )).select(db.user_vm_map.user_id,db.vm_data.vm_name): send_email=1 vm_users.append(user.user_vm_map.user_id) vm_name=user.vm_data.vm_name if (send_email == 1): vm_shutdown_time=send_email_vm_warning(VM_TASK_WARNING_SHUTDOWN,vm_users,vm_name,'') logger.debug("Mail sent for vm_name:"+str(vm_name)+"|shutdown time returned from the function:"+ str(vm_shutdown_time)) db(db.vm_data.id == vm.id).update(shutdown_warning_date=vm_shutdown_time) db.commit() else: logger.debug("Warning Email to use the VM has already been sent to VM_ID:"+str(vm.id)) else: logger.info("VM:"+str(vm.id)+" is in use.. no need to send shutdown warning mail ...") except: log_exception() pass finally: db.commit() logger.debug("EXITING send warning to unused VM........")
def send_email_on_registration_denied(user_id): user_info = get_user_details(user_id) if user_info[1] != None: context = dict(userName=user_info[0], supportMail=config.get("MAIL_CONF", "mail_admin_request")) send_email(user_info[1], REGISTRATION_DENIED_SUBJECT, REGISTRATION_DENIED_BODY, context)
def grant_novnc_access(vm_id): msg = "" active_vnc = db((db.vnc_access.vm_id == vm_id) & (db.vnc_access.status == VNC_ACCESS_STATUS_ACTIVE)).count() if active_vnc > 0: vm_data = db(db.vnc_access.vm_id == vm_id).select().first() token = vm_data.token msg = 'VNC access already granted. Please check your mail for further details.' else: vnc_count = db((db.vnc_access.vm_id == vm_id) & (db.vnc_access.time_requested > (get_datetime() - timedelta(days=1)))).count() if vnc_count >= MAX_VNC_ALLOWED_IN_A_DAY : msg = 'VNC request has exceeded limit.' else: try: f = os.popen('openssl rand -hex 10') token = f.read() token = token.split("\n") token=token[0] create_novnc_mapping(vm_id,token) vm_data = db(db.vm_data.id == vm_id).select().first() host_ip = vm_data.host_id.host_ip.private_ip vnc_port = vm_data.vnc_port vnc = str(vnc_port) file_token =str(token) +":" + " " + str(host_ip)+ ":" + str(vnc) + "\n" myfile=get_file_append_mode("/home/www-data/token.list") myfile.write(file_token) command = "ps -ef | grep websockify|awk '{print $2}'" port = config.get("NOVNC_CONF","port") server_ip = config.get("NOVNC_CONF","server_ip") return_value = execute_remote_cmd(server_ip, 'root',command) return_value=return_value.split() if len(return_value) <=2: command = "./noVNC/utils/websockify/run --web /root/noVNC --target-config /home/www-data/token.list " +str(server_ip)+ ":"+str(port) + " > /dev/null 2>&1 &" return_value = execute_remote_cmd(server_ip, 'root',command) msg = 'VNC access granted. Please check your mail for further details.' except: logger.debug('Some Error Occurred. Please try later') log_exception() pass logger.debug(msg) return token
def remove_vnc_mapping_from_nat(vm_id): vm_data = current.db.vm_data[vm_id] vnc_host_ip = config.get("GENERAL_CONF", "vnc_ip") host_ip = vm_data.host_id.host_ip.private_ip vnc_port = vm_data.vnc_port try: remove_mapping(vnc_host_ip, host_ip, vnc_port, vnc_port) logger.debug("Updating DB") current.db(current.db.vnc_access.vm_id == vm_id).update(status = VNC_ACCESS_STATUS_INACTIVE) except: log_exception()
def install_container(name, templateid, env, cpushare, memory, portmap=False, setnginx=True): imageprofile = getImageProfile(templateid) nodes = get_node_to_deploy() nodeindex = get_node_pack(nodes, memory, 1) port = imageprofile['port'] if (port): portmap = True if (not env): env = { 'constraint:node=': nodes[nodeindex]['Name'] } else: env['constraint:node='] = nodes[nodeindex]['Name'] env['TERM'] = 'xterm' if (imageprofile['updatemysql']): extrahosts = {'mysql': config.get("DOCKER_CONF", "mysql_machine")} else: extrahosts = None hostconfig = client.create_host_config( publish_all_ports=portmap, mem_limit=memory, cap_drop=imageprofile['permissiondrop'], cap_add=imageprofile['permissionadd'], links=imageprofile['links'], extra_hosts=extrahosts) try: containerid = client.create_container(name=name, image=imageprofile['Id'], command=imageprofile['cmd'], environment=env, detach=True, cpu_shares=cpushare, host_config=hostconfig) except docker.errors as e: print(e) return # Update the db -- container in created state..... try: response = client.start(container=containerid['Id']) # @UnusedVariable # Update the db -- container in running state except docker.errors as e: print(e) if (port and setnginx): container = Container(containerid) container.addipbyconf() return containerid
def remove_vnc_mapping_from_nat(vm_id): vm_data = current.db.vm_data[vm_id] vnc_host_ip = config.get("GENERAL_CONF", "vnc_ip") host_ip = vm_data.host_id.host_ip.private_ip vnc_port = vm_data.vnc_port try: remove_mapping(vnc_host_ip, host_ip, vnc_port, vnc_port) logger.debug("Updating DB") current.db(current.db.vnc_access.vm_id == vm_id).update( status=VNC_ACCESS_STATUS_INACTIVE) except: log_exception()
def request_vpn(): user_info=get_vpn_user_details() logger.debug(type(user_info)) user_name=user_info['username'] cmd="./vpn_client_creation.sh "+ str(user_name) #vpn_ip="" vpn_ip=config.get("VPN_CONF","vpn_server_ip") vpn_key_path=config.get("VPN_CONF","vpn_key_path") try: var = execute_remote_cmd(vpn_ip, 'root',cmd, ret_list=True) filepath=vpn_key_path+str(user_name)+"_baadalVPN.tar" localpath = os.path.join(get_context_path(), 'private/VPN/' + str(user_name) +"_baadalVPN.tar") sftp_files(vpn_ip, 'root', filepath, localpath) if "false" in str(var): return 1 elif "true" in str(var): return 3 #transfer_vpn_files(user_name,vpn_ip,password) except Exception: return 2
def install_container(name,templateid,mount_hostvolumes,env,cpushare,memory,portmap=True): imageprofile = getImageProfile(templateid) nodes = get_node_to_deploy(); nodeindex = get_node_pack(nodes,memory,1); if(not env): env = {'constraint:node=':nodes[nodeindex]['Name']}; else: env['constraint:node='] =nodes[nodeindex]['Name']; if(imageprofile['updatemysql'] ): extrahosts = {'mysql':config.get("DOCKER_CONF","mysql_machine")} else : extrahosts = None; if (imageprofile['mountdestdir']): bindconfig1 ={} bindconfig1['bind'] = imageprofile['mountdestdir']; bindconfig1['mode'] ='rw' ; binds = {} binds[mount_hostvolumes] = bindconfig1; hostconfig = client.create_host_config(binds = binds,publish_all_ports = portmap,mem_limit = memory,links = imageprofile['links'],cap_drop=imageprofile['permissiondrop'],cap_add=imageprofile['permissionadd'],extra_hosts=extrahosts); print(hostconfig); try: containerid = client.create_container(name=name ,image = imageprofile['Id'] , command = imageprofile['cmd'] , volumes = [imageprofile['mountdestdir']] , environment = env , detach = True ,cpu_shares=cpushare, host_config = hostconfig ); print (containerid ); except docker.errors as e: print (e); else : hostconfig = client.create_host_config(publish_all_ports = portmap,mem_limit = memory,cap_drop=imageprofile['permissiondrop'],cap_add=imageprofile['permissionadd'],links = imageprofile['links'],extra_hosts=extrahosts); try: containerid = client.create_container(name=name,image = imageprofile['Id'] , command = imageprofile['cmd'], environment = env , detach = True ,cpu_shares=cpushare, host_config = hostconfig ); print (containerid ); except docker.errors as e: print (e); # Update the db -- container in created state..... print ('db update'); try: response = client.start(container = containerid['Id']); print (response) ; # Update the db -- container in running state except docker.errors as e: print(e); port = imageprofile['port']; if( port) : container = Container(containerid); container.addipbyconf(); return containerid;
def process_shutdown_unusedvm(): logger.info("ENTERING SHUTDOWN UNUSED VM ........") try: # Fetch all the VM's which are locked and whose shutdown_warning_date=today. vmCPUThreshold = config.get("GENERAL_CONF", "cpu_threshold_limit") vmreadThreshold = config.get("GENERAL_CONF", "nwRead_threshold_limit") vmwriteThreshold = config.get("GENERAL_CONF", "nwWrite_threshold_limit") thresholdcontext = dict(CPUThreshold=vmCPUThreshold, ReadThreshold=vmreadThreshold, WriteThreshold=vmwriteThreshold) for vmData in db(db.vm_data.shutdown_warning_date!=None).select(db.vm_data.ALL): daysDiff=(get_datetime()-vmData.shutdown_warning_date).days if(daysDiff >= 0): '''Again compare the data for last 20 days from rrd logs ''' retVal=compare_rrd_data_with_threshold(vmData.vm_identity,thresholdcontext) logger.info(" DaysDiff are "+str(daysDiff)+" return value is "+str(retVal)) if(retVal == True): logger.info("Need to shutdown the VM ID:"+str(vmData.id)) add_vm_task_to_queue(vmData.id,VM_TASK_DESTROY) # make an entry in task queue so that scheduler can pick up and shutdown the VM. else: logger.info("No Need to shutdown the VM ID:"+str(vmData.id)+" as VM is in use now. ") #update db to clean the shutdown warning date db(db.vm_data.id == vmData.id).update(shutdown_warning_date=None) else: logger.info("No need to process purge for the VM:"+str(vmData.id)) except: log_exception() pass finally: db.commit() logger.debug("EXITING SHUTDOWN UNUSED VM ........")
def install(config): hostname = config.get("network", "hostname") domain = config.get("network", "domain") osc = OSCommands(MODULE_NAME) # Generate CA osc.cmdlog("openssl genrsa -out /etc/ssl/private/server.key") # Generate SSL certs # OLDVSNEW: Removed empty C, ST and L subj parameters command = "openssl req -new -key /etc/ssl/private/server.key " command += "-out /tmp/server.csr " command += '-subj \"/O=FreeNEST/CN=' command += hostname + '.' + domain + '\"' osc.cmdlog(command) command = "openssl x509 -req -in /tmp/server.csr " command += "-signkey /etc/ssl/private/server.key " command += "-out /etc/ssl/certs/server.crt" osc.cmdlog(command) # Remove unnecessary files if os.path.exists( "/tmp/server.csr" ): os.remove("/tmp/server.csr")
def send_email_to_vm_user(task_type, vm_name, request_time, vm_users): for vm_user in vm_users: user_info = get_user_details(vm_user) if user_info[1] != None: context = dict(vmName = vm_name, userName = user_info[0], taskType = task_type, requestTime=request_time.strftime("%A %d %B %Y %I:%M:%S %p")) if task_type == VM_TASK_CREATE: context.update({'gatewayVM':config.get("GENERAL_CONF","gateway_vm")}) send_email(user_info[1], VM_CREATION_SUBJECT, VM_CREATION_BODY, context) else: subject = TASK_COMPLETE_SUBJECT.format(dict(taskType=task_type)) send_email(user_info[1], subject, TASK_COMPLETE_BODY, context)
def send_email_to_vm_user(task_type, vm_name, request_time, vm_users): for vm_user in vm_users: user_info = get_user_details(vm_user) if user_info[1] != None: context = dict( vmName=vm_name, userName=user_info[0], taskType=task_type, requestTime=request_time.strftime("%A %d %B %Y %I:%M:%S %p")) if task_type == VM_TASK_CREATE: context.update( {'gatewayVM': config.get("GENERAL_CONF", "gateway_vm")}) send_email(user_info[1], VM_CREATION_SUBJECT, VM_CREATION_BODY, context) else: subject = TASK_COMPLETE_SUBJECT.format( dict(taskType=task_type)) send_email(user_info[1], subject, TASK_COMPLETE_BODY, context)
def process_sendwarning_shutdownvm(): logger.info("Entering Process send warning mail to shutdown vm........") try: vmShutDownDays = config.get("GENERAL_CONF", "shutdown_vm_days") send_email=0 for vm_id in db().select(db.vm_event_log.vm_id, distinct=True): for vm_details in db(db.vm_event_log.vm_id==vm_id['vm_id']).select(db.vm_event_log.ALL,orderby = ~db.vm_event_log.id,limitby=(0,1)): daysDiff=(get_datetime()-vm_details.timestamp).days vm_shutdown_time=vm_details.timestamp logger.info("VM details are VM_ID:" + str(vm_details['vm_id'])+ "|ID:"+str(vm_details['id'])+"|new_values is:"+str(vm_details['new_value'])+"|daysDiff:" + str(daysDiff)+"|vmShutDownDays:"+vmShutDownDays+"|vm_shutdown_time :"+str(vm_shutdown_time)) if (vm_details.new_value == "Shutdown" and int(daysDiff)>=int(vmShutDownDays)): vm_users = [] vm_name = "" for user in db((db.user_vm_map.vm_id == vm_details.vm_id) & (db.user_vm_map.vm_id == db.vm_data.id) & (db.vm_data.locked != True) & (db.vm_data.delete_warning_date == None )).select(db.user_vm_map.user_id,db.vm_data.vm_name): send_email=1 vm_users.append(user.user_vm_map.user_id) vm_name=user.vm_data.vm_name if (send_email == 1): vm_delete_time=send_email_vm_warning(VM_TASK_WARNING_DELETE,vm_users,vm_name,vm_shutdown_time) logger.debug("Mail sent for vm_id:"+str(vm_details.vm_id)+"|vm_name:"+str(vm_name)+"|delete time:"+ str(vm_delete_time)) db(db.vm_data.id == vm_details.vm_id).update(locked=True, delete_warning_date=vm_delete_time) send_email=0 else: logger.debug("Email has already been sent to VM_ID:"+str(vm_details.vm_id)) else: logger.info("VM:"+str(vm_details.vm_id)+" is not shutdown for: "+str(vmShutDownDays)+"(configured) days") except: log_exception() pass finally: db.commit() logger.debug("EXITING Send warning to shutdown vm........")
def send_email_to_user(task_type, entity_name, request_time, user_list): for _user in user_list: user_info = get_user_details(_user) if user_info[1] != None: context = dict(entityName = entity_name, userName = user_info[0], taskType = task_type, requestTime=request_time.strftime("%A %d %B %Y %I:%M:%S %p")) if task_type == VM_TASK_CREATE: context.update({'gatewayVM':config.get("GENERAL_CONF","gateway_vm")}) send_email(user_info[1], VM_CREATION_SUBJECT, VM_CREATION_BODY, context) elif task_type == Object_Store_TASK_CREATE: send_email(user_info[1], OBJECT_CREATION_SUBJECT, OBJECT_CREATION_BODY, context) elif task_type in CONTAINER_TASKS: subject = TASK_COMPLETE_SUBJECT.format(dict(taskType=task_type)) send_email(user_info[1], subject, CONT_TASK_COMPLETE_BODY, context) else: subject = TASK_COMPLETE_SUBJECT.format(dict(taskType=task_type)) send_email(user_info[1], subject, TASK_COMPLETE_BODY, context)
def create_vnc_mapping_in_nat(vm_id): vm_data = current.db.vm_data[vm_id] vnc_host_ip = config.get("GENERAL_CONF", "vnc_ip") duration = 30 * 60 #30 minutes host_ip = vm_data.host_id.host_ip.private_ip vnc_port = vm_data.vnc_port vnc_id = current.db.vnc_access.insert(vm_id=vm_id, host_id=vm_data.host_id, vnc_server_ip=vnc_host_ip, vnc_source_port=vnc_port, vnc_destination_port=vnc_port, duration=duration, status=VNC_ACCESS_STATUS_INACTIVE) try: create_mapping(vnc_host_ip, host_ip, vnc_port, vnc_port, duration) current.db.vnc_access[vnc_id] = dict(status=VNC_ACCESS_STATUS_ACTIVE) except: log_exception()
def create_vnc_mapping_in_nat(vm_id): vm_data = current.db.vm_data[vm_id] vnc_host_ip = config.get("GENERAL_CONF", "vnc_ip") duration = 30 * 60 #30 minutes host_ip = vm_data.host_id.host_ip.private_ip vnc_port = vm_data.vnc_port vnc_id = current.db.vnc_access.insert(vm_id = vm_id, host_id = vm_data.host_id, vnc_server_ip = vnc_host_ip, vnc_source_port = vnc_port, vnc_destination_port = vnc_port, duration = duration, status = VNC_ACCESS_STATUS_INACTIVE) try: create_mapping(vnc_host_ip, host_ip, vnc_port, vnc_port, duration) current.db.vnc_access[vnc_id] = dict(status = VNC_ACCESS_STATUS_ACTIVE) except: log_exception()
def process_vmdaily_checks(): """ Function will check for the shutdown VM's and sends email to the user""" logger.info("Entering VM's Daily Checks........") try: vmShutDownDays = config.get("GENERAL_CONF", "shutdown_vm_days") send_email=0 for vm_id in db().select(db.vm_event_log.vm_id, distinct=True): for vm_details in db(db.vm_event_log.vm_id==vm_id['vm_id']).select(db.vm_event_log.ALL,orderby = ~db.vm_event_log.id,limitby=(0,1)): daysDiff=(get_datetime()-vm_details.timestamp).days vm_shutdown_time=vm_details.timestamp logger.info("VM details are VM_ID:" + str(vm_details['vm_id'])+ "|ID:"+str(vm_details['id'])+"|new_values is:"+str(vm_details['new_value'])+"|daysDiff:" + str(daysDiff)+"|vmShutDownDays:"+vmShutDownDays+"|vm_shutdown_time :"+str(vm_shutdown_time)) if (vm_details.new_value == "Shutdown" and int(daysDiff)>=int(vmShutDownDays)): vm_users = [] vm_name = "" for user in db((db.user_vm_map.vm_id == vm_details.vm_id) & (db.user_vm_map.vm_id == db.vm_data.id) & (db.vm_data.locked !='T') & (db.vm_data.delete_warning_date == None )).select(db.user_vm_map.user_id,db.vm_data.vm_name): send_email=1 vm_users.append(user.user_vm_map.user_id) vm_name=user.vm_data.vm_name if (send_email == 1): vm_delete_time = send_email_delete_vm_warning(vm_users,vm_name,vm_shutdown_time) logger.debug("Mail sent for vm_name:"+str(vm_name)+"|delete time returned from the function:"+ str(vm_delete_time)) db(db.vm_data.id == vm_details.vm_id).update(locked=True, delete_warning_date=vm_delete_time) else: logger.debug("Email has already been sent to VM_ID:"+str(vm_details.vm_id)) else: logger.info("VM:"+str(vm_details.vm_id)+" is not shutdown ..") except: log_exception() pass finally: db.commit() logger.debug("EXITING VM DAILY CHECKS........")
def respawn_dangling_vms(host_id): vms = current.db(current.db.vm_data.host_id == host_id).select(current.db.vm_data.ALL) vm_image_location = get_constant('vmfiles_path') + get_constant('vms') + '/%s/%s.qcow2' for vm_data in vms: logger.debug('Re-spawning VM ' + vm_data.vm_identity) #Create a copy of existing image and rename it with '_old' suffix storage_type = config.get("GENERAL_CONF","storage_type") copy_command = 'ndmpcopy ' if storage_type == current.STORAGE_NETAPP_NFS else 'cp ' ds_image_location = vm_data.datastore_id.path + get_constant('vms') + '/%s/%s.qcow2' command_to_execute = copy_command + ds_image_location%(vm_data.vm_identity, vm_data.vm_identity) + \ ' ' + ds_image_location%(vm_data.vm_identity, vm_data.vm_identity+'_old') execute_remote_cmd(vm_data.datastore_id.ds_ip, vm_data.datastore_id.username, command_to_execute, vm_data.datastore_id.password) logger.debug('Backup copy of the VM image cretaed successfully.') vm_properties = {} vm_properties['host'] = find_new_host(vm_data.RAM, vm_data.vCPU) vm_properties['ram'] = vm_data.RAM vm_properties['vcpus'] = vm_data.vCPU vm_properties['mac_addr'] = vm_data.private_ip.mac_addr vm_properties['vnc_port'] = vm_data.vnc_port vm_properties['template'] = current.db.template[vm_data.template_id] vm_properties['vlan_name'] = current.db(current.db.private_ip_pool.private_ip == vm_data.private_ip).select()[0].vlan.name # Re-spawn the VM on new host launch_vm_on_host(vm_data, vm_image_location%(vm_data.vm_identity, vm_data.vm_identity), vm_properties) vm_data.update_record(host_id = vm_properties['host']) #Find the most recent snapshot of the given VM; revert to the snapshot recent_snapshot = current.db(current.db.snapshot.vm_id == vm_data.id).select(orderby = ~current.db.snapshot.timestamp)[0] logger.debug('Reverting VM %s to snapshot %s' %(vm_data.vm_identity, recent_snapshot.snapshot_name)) revert(dict(vm_id = vm_data.id, snapshot_id = recent_snapshot.id))
def install_container(name,templateid,env,cpushare,memory,portmap=False,setnginx=True,restart_policy='no'): imageprofile = getImageProfile(templateid) nodes = get_node_to_deploy() nodeindex = get_node_pack(nodes,memory,1) port = imageprofile['port'] if (port): portmap = True if(not env): env = {'constraint:node=':nodes[nodeindex]['Name']} else: env['constraint:node='] =nodes[nodeindex]['Name'] env['TERM'] = 'xterm' if(imageprofile['updatemysql'] ): extrahosts = {'mysql':config.get("DOCKER_CONF","mysql_machine_ip")} else : extrahosts = None ulimits=[] import docker.utils ulimits.append(docker.utils.Ulimit(Name='NPROC',Soft=500,Hard=1000)) ulimits.append(docker.utils.Ulimit(Name='NOFILE',Soft=4000,Hard=8000)) hostconfig = client.create_host_config(publish_all_ports = portmap,mem_limit = memory,cap_drop=imageprofile['permissiondrop'],cap_add=imageprofile['permissionadd'],links = imageprofile['links'],extra_hosts=extrahosts,restart_policy={'Name':restart_policy,'MaximumRetryCount':5},ulimits=ulimits) try: containerid = client.create_container(name=name,image = imageprofile['Id'] , command = imageprofile['cmd'], environment = env , detach = True ,cpu_shares=cpushare, host_config = hostconfig ) except docker.errors as e: print (e) return # Update the db -- container in created state..... try: response = client.start(container = containerid['Id']) # @UnusedVariable # Update the db -- container in running state except Exception as e: logger.debug(e) if( port and setnginx) : container = Container(containerid) container.addipbyconf() return containerid
def get_user(self): token = self.accessToken() if not token: return None uri = config.get("OAUTH_CONF", "resource_url") r = requests.get(uri, params={'access_token': token}) userdata = r.json() user_info = {} if ' ' in userdata['name']: #user_info['first_name'],user_info['middle_name'],user_info['last_name']=userdata['name'].split() data = userdata['name'].split() if len(data) > 2: user_info['first_name'] = data[0] user_info['middle_name'] = data[1] user_info['last_name'] = data[2] else: user_info['first_name'] = data[0] user_info['last_name'] = data[1] else: user_info['first_name'] = userdata['name'] user_info['last_name'] = ' ' user_info['user_name'] = userdata['user_id'] user_info['email'] = userdata['email'] user_info['roles'] = fetch_user_role(user_info['user_name']) # If user has super admin rights; it is added to separate organization if current.ADMIN in user_info['roles']: user_info['organisation'] = 'ADMIN' else: user_info['organisation'] = 'IITD' create_or_update_user(user_info, False) return dict(first_name=user_info['first_name'], last_name=user_info['last_name'], email=userdata['email'], username=userdata['user_id'])
import mysql.connector from helper import config con = { 'host': config.get('database', 'host'), 'user': config.get('database', 'user'), 'passwd': config.get('database', 'password'), 'db': config.get('database', 'db'), 'port': config.get('database', 'port') } class db: config = {} def __init__(self, config): self.config = config def excute(self, sql, data): try: cnn = mysql.connector.connect(**self.config) cursor = cnn.cursor() cursor.execute(sql, data) except mysql.connector.Error as e: print('mysql fails!{}'.format(e)) finally: cursor.close() cnn.close()
def _get_nat_details(): nat_type = config.get("GENERAL_CONF", "nat_type") nat_ip = config.get("GENERAL_CONF", "nat_ip") nat_user = config.get("GENERAL_CONF", "nat_user") return (nat_type, nat_ip, nat_user)
def send_email_on_registration_denied(user_id): user_info = get_user_details(user_id) if user_info[1] != None: context = dict(userName = user_info[0], supportMail = config.get("MAIL_CONF","mail_admin_request")) send_email(user_info[1], REGISTRATION_DENIED_SUBJECT, REGISTRATION_DENIED_BODY, context)
################################################################################### # Added to enable code completion in IDE's. if 0: from gluon import * # @UnusedWildImport from applications.baadal.models import * # @UnusedWildImport ################################################################################### from simplejson import loads, dumps from ast import literal_eval from helper import config,get_datetime, IS_MAC_ADDRESS from auth_user import login_callback,login_ldap_callback, AUTH_TYPE_LDAP from datetime import timedelta from host_helper import HOST_TYPE_PHYSICAL #### Connection Pooling of Db is also possible db_type = config.get("GENERAL_CONF","database_type") conn_str = config.get(db_type.upper() + "_CONF", db_type + "_conn") #db = DAL(conn_str,fake_migrate_all=True) db = DAL(conn_str) db.define_table('constants', Field('name', 'string', length = 255, notnull = True, unique = True), Field('value', 'string', length = 255, notnull = True)) db.define_table('organisation', Field('name', 'string', length = 255, notnull = True, unique = True), Field('details', 'string', length = 255), Field('public_ip', 'string',length = 15), Field('admin_mailid', 'string', length = 50), format = '%(details)s')
In order to manages all the configured IPs, a directory 'dhcp.d' is created within /etc/dhcp. For every new dhcp entry, a file with unique name is created in this directory with MAC address and fixed IP address configuration. The DHCP configuration file dhcpd.conf file is constructed using following command cat /etc/dhcp/dhcp.d/*.conf > /etc/dhcp/dhcpd.conf For removing the entry, corresponding file is deleted from the directory, and dhcp.d file is re-constructed DHCP is restarted after each modification. """ from helper import config, execute_remote_cmd dhcp_ip = config.get("GENERAL_CONF", "dhcp_ip") #Creates bulk entry into DHCP # Gets list of tuple containing (host_name, mac_addr, ip_addr) def create_dhcp_bulk_entry(dhcp_info_list): if len(dhcp_info_list) == 0: return entry_cmd = "" for dhcp_info in dhcp_info_list: host_name = dhcp_info[0] if dhcp_info[0] != None else ( 'IP_' + dhcp_info[2].replace(".", '_')) dhcp_cmd = ''' file_name="/etc/dhcp/dhcp.d/1_%s.conf" if [ -e "$file_name" ]
################################################################################### # Added to enable code completion in IDE's. if 0: from gluon import * # @UnusedWildImport from gluon import auth, request, session from applications.baadal.models import * # @UnusedWildImport from db import oauth_login import gluon global auth; auth = gluon.tools.Auth() ################################################################################### from auth_user import AUTH_TYPE_OAUTH from gluon import current # @Reimport from helper import config, get_constant from maintenance import BAADAL_STATUS_UP current.auth_type = config.get("AUTH_CONF","auth_type") def user(): """ exposes: http://..../[app]/default/user/login http://..../[app]/default/user/logout http://..../[app]/default/user/register http://..../[app]/default/user/profile http://..../[app]/default/user/retrieve_password http://..../[app]/default/user/change_password use @auth.requires_login() @auth.requires_membership('group name') to decorate functions that need access control """
def is_auth_type_db(): auth_type = config.get("AUTH_CONF","auth_type") return (auth_type == AUTH_TYPE_DB)
if result_type == ldap.RES_SEARCH_ENTRY: for name,attrs in result_data: for k,vals in attrs.items(): if k == 'cn': name_lst = vals[0].split(' ') user_info['first_name'] = name_lst[0] if len(name_lst) == 2: user_info['last_name'] = name_lst[1] elif len(name_lst) > 2: user_info['last_name'] = vals[0][vals[0].index(' '):].lstrip() # if k == 'altEmail': # if vals[0] != 'none': # user_info['email'] = vals[0] #TODO: find role and organisation from ldap and set in db accordingly (current iitd ldap does not support this feature entirely) user_info['email'] = username + config.get("MAIL_CONF", "email_domain") user_info['roles'] = fetch_user_role(username) # If user has super admin rights; it is added to separate organization if current.ADMIN in user_info['roles']: user_info['organisation'] = 'ADMIN' else: user_info['organisation'] = 'IITD' except ldap.LDAPError, e: logger.error(e) logger.info(user_info) if 'first_name' in user_info: return user_info else: return None
topic = cache.rpop('topic_queque') def getTopicFromJason(s): d = demjson.decode(s) return Topic(d['id'], d['name'], d['parent_id']) def sleep(): t = random.uniform(0.5, 3) time.sleep(t) def visic(topic): sleep() db.saveTopic(topic) cache = redis.Redis(host=config.get("redis", "host"), port=config.get("redis", "port"), db=0, password=config.get("redis", "password")) db = TopicDB(TopicDB.config) rootTopicJson = cache.rpop('topic_queue') if rootTopicJson == None: rtId = '19776749' #root rootTopic = db.queryTopicById(rtId) else: rootTopic = getTopicFromJason(rootTopicJson) wideTrasvel(rootTopic, db, cache)
# -*- coding: utf-8 -*- ################################################################################### from helper import config, execute_remote_cmd dhcp_ip = config.get("GENERAL_CONF","dhcp_ip") #Creates bulk entry into DHCP # Gets list of tuple containing (host_name, mac_addr, ip_addr) def create_dhcp_bulk_entry(dhcp_info_list): if len(dhcp_info_list) == 0: return entry_cmd = "" for dhcp_info in dhcp_info_list: host_name = dhcp_info[0] if dhcp_info[0] != None else ('IP_' + dhcp_info[2].replace(".", '_')) dhcp_cmd = ''' file_name="/etc/dhcp/dhcp.d/1_%s.conf" if [ -e "$file_name" ] then echo $file_name else echo "host %s {\n\thardware ethernet %s;\n\tfixed-address %s;\n}\n" > $file_name fi ''' %(host_name, host_name, dhcp_info[1], dhcp_info[2]) entry_cmd += dhcp_cmd restart_cmd = ''' cat /etc/dhcp/dhcp.d/*.conf > /etc/dhcp/dhcpd.conf /etc/init.d/isc-dhcp-server restart '''
# Added to enable code completion in IDE's. if 0: from gluon import * # @UnusedWildImport from gluon import auth, request, session from applications.baadal.models import * # @UnusedWildImport from db import oauth_login import gluon global auth auth = gluon.tools.Auth() ################################################################################### from auth_user import AUTH_TYPE_OAUTH from gluon import current # @Reimport from helper import config, get_constant from maintenance import BAADAL_STATUS_UP current.auth_type = config.get("AUTH_CONF", "auth_type") def user(): """ exposes: http://..../[app]/default/user/login http://..../[app]/default/user/logout http://..../[app]/default/user/register http://..../[app]/default/user/profile http://..../[app]/default/user/retrieve_password http://..../[app]/default/user/change_password use @auth.requires_login() @auth.requires_membership('group name') to decorate functions that need access control """