def ipapath(self): logger.debug('MyWorkHub.ipapath') if not os.path.exists(self._path): logger.debug('MyWorkHub.prep: workhub has not been downloaded yet') if not connection.download_workhub('ios', self._path): raise Exception('Failed to download workhub') return self._path
def set_storage_mode(self, mode): mode_switch = {'appdata': 0, 'xstorage': 1, 'fullpath': 2} if mode in mode_switch: if not UiObj('/RadioButton[@package=%s][@instance=%s]' % (self._packbund, mode_switch[mode])).click(): raise Exception('Storage operation on Test App: Cannot select mode %s' % mode) else: logger.debug('TAoneApp.set_storage_mode: storage mode kept unchanged.')
def clear_all_timedout_vnc_mappings(): # Get all active VNC mappings from DB current.db("FLUSH QUERY CACHE") vnc_mappings = current.db((current.db.vnc_access.status == VNC_ACCESS_STATUS_ACTIVE) & (current.db.vnc_access.expiry_time < get_datetime())).select() if (vnc_mappings != None) & (len(vnc_mappings) != 0): for mapping in vnc_mappings: logger.debug('Removing VNC mapping for vm id: %s, host: %s, source IP: %s, source port: %s, destination port: %s' %(mapping.vm_id, mapping.host_id, mapping.token, mapping.vnc_source_port, mapping.vnc_destination_port)) f = open("/home/www-data/token.list","r") lines = f.readlines() f.close() f = open("/home/www-data/token.list","w") token = mapping.token logger.debug("token is : " + str(token)) logger.debug("token type is : " + str(type(token))) for line in lines: if token not in line: logger.debug("lines are : " + str(line)) f.write(line) f.close() current.db(current.db.vnc_access.id == mapping.id).delete() current.db.commit() logger.debug("Done clearing novnc mappings") else: raise Exception("NAT type is not supported")
def paste_text(self, clipboard='workspace', verify=None, block=False): if not self.is_bookmarks_page(): if not self.move_to_bookmark(): raise Exception('Failed to paste text on Work Web: Cannot move to Bookmarks') if not self.switch_address_bar_mode('editable'): raise Exception('Failed to paste text on Work Web: Cannot switch address bar mode') edit = self.address_bar_edit() edit.set_text('DUMMYTEXT', hint=ui.WorkWeb.get('Search or type a URL')) if not edit.select_text(): raise Exception('Failed to paste text on Work Web: Cannot select text') if not click_clipboard_command(ui.Android.get('Paste')): raise Exception('Failed to paste text on Work Web: Cannot choose Paste on Clipboard commands') self.paste_from_clipboard(clipboard) if verify: current = edit.get_text() logger.debug('WorkWeb.paste_text: AddressBar.getText()=%s' % current) logger.debug('WorkWeb.paste_text: verify=%s' % verify) if current == verify: if block: raise Exception('Failed to paste text on Work Web: Text was pasted unexpectedly --- should NOT be pasted') else: if not block: raise Exception('Failed to paste text on Work Web: Text was NOT pasted successfully') self.switch_address_bar_mode('static') if self.is_android_error(): raise Exception('Failed to paste text on Work Web: Android error') if not self.is_app_foreground(): raise Exception('Failed to paste text on Work Web: Eventually app is not on screen') return self
def check_response_head_default(res): match = re.search('< HTTP/1\.1 +(2\d+ +OK|100 +Continue).*', res) if not match: logger.debug('check_response_head_default: HTTP response is NOT OK/Continue') return False logger.debug('check_response_head_default: HTTP response: %s' % match.group(0)) return True
def signin(self, username=None, password=None): logger.debug('WorkHubCore.signin: username=%s, password=%s' % (username, password)) if not username: username = self._username if not password: password = self._password if self.is_signin_prompted(): if not UiObj('/EditText[@instance=0]').set_text(username.decode(), hint=ui.WorkHub.get('User Name')): raise Exception('Failed to sign in to Work Hub: Username input') if not UiObj('/EditText[@instance=1]').set_text(password.decode(), hint=ui.WorkHub.get('Password')): raise Exception('Failed to sign in to Work Hub: Password input') if not UiObj('/Button[@text=%s]' % ui.WorkHub.get('Sign In')).click(): raise Exception('Failed to sign in to Work Hub: Sign-in click') if not self.wait_for_complete(): raise Exception('Failed to sign in to Work Hub: Timeout after entering credential') if UiObj('/TextView[@package=%s][@text=%s]' % (self._packbund, ui.WorkHub.get('Device Ownership'))).exists(): if not UiObj('/RadioButton[@text=%s]' % ui.WorkHub.get('No')).click(): raise Exception('Failed to sign in to Work Hub: Device Ownership option selection') if not UiObj('/Button[@text=%s]' % ui.WorkHub.get('Continue')).click(): raise Exception('Failed to sign in to Work Hub: Device Ownership continue') if not self.wait_for_complete(): raise Exception('Failed to sign in to Work Hub: Timeout after choosing device ownership') errmsg = UiObj('/TextView[@package=%s][@text="%s"]' % (self._packbund, ui.WorkHub.get('An error occurred during sign in'))) if errmsg.exists(): raise Exception('Failed to sign in to Work Hub: Sign-in error') if self.is_signin_prompted(): raise Exception('Failed to sign in to Work Hub: Unknown error, sign-in is still being prompted') if not self.wait_for_complete(): raise Exception('Failed to sign in to Work Hub: Timeout after signing') if self.is_android_error(): raise Exception('Failed to sign in to Work Hub: Android error') return self
def match(template_name, image_path): logger.debug('screenmatch: template_name=%s, image_path=%s' % (template_name, image_path)) if template_name not in _templates: logger.debug('screenmatch: invalid template name') return (None, None), (None, None) template = ImageTemplate(os.path.join(settings.local.ss_folder, _templates[template_name]['filename'])) return template.match(image_path, _templates[template_name]['threshold'])
def tasklist_req(self, data): # send {'id','src'} dict back # f = lambda dic: map(lambda key:(key,dic[key].get('src','N/A')), dic.keys()) def f(dic): ret = {} for k in dic.keys(): ret[k] = dic[k] return ret logger.debug('in tasklist_req: data is %s'%str(data)) ret = {'wait_list':{}, 'convert_list':[], 'history_list':[]} ret['wait_list'].update(f(request_list)) #t = lambda handle: "%ds"%(self.getengine(data).getprogress(handle)) #map(ret['convert_list'].append, # [[key, task_list[key].get('src'), t(task_list[key].handle)] for key in task_list.keys()] # ) def t(key): d = task_list.get(key) return self.getengine(d).getprogress(d.handle) for key in task_list.keys(): src = task_list[key].get('src') ret['convert_list'].append([key, src, t(key)]) for key in history_list.keys(): src = history_list[key].get('src') rval = history_list[key].get('ret', 1) ret['history_list'].append([key, src, rval]) self.request.send(jsontool.encode(ret))
def progress_req(self, data): logger.debug('progress req:%s'%str(data)) logger.info("progress req:%s"%data) if not data.has_key('id'): self.request.send(jsontool.encode(REQ_INVALID_ID)) return id = data.get('id') if request_list.has_key(id): self.request.send(jsontool.encode(TASK_NOT_START)) return elif task_list.has_key(id): task = task_list.get(id) try: #p = media_engine.getprogress(task.handle) p = self.getengine(data).getprogress(task.handle) except AttributeError: self.request.send(jsontool.encode(TASK_NOT_START)) self.request.send(jsontool.encode(TASK_PROGRESS%p)) return elif history_list.has_key(id): # in history_list, data has been convert to origin dict data = history_list.get(id) assert(data.has_key('ret')) if data['ret'] == 0: self.request.send(jsontool.encode(TASK_FINISHED)) else: self.request.send(jsontool.encode(TASK_FAILED % data['ret'])) return else: self.request.send(jsontool.encode(REQ_INVALID_ID))
def remove_mapping(source_ip, destination_ip, source_port=-1, destination_port=-1): nat_type, nat_ip, nat_user = get_nat_details() if nat_type == NAT_TYPE_SOFTWARE: # source_port and destination_port are -1 when function is called for removing public IP - private IP mapping if source_port == -1 and destination_port == -1: logger.debug("Removing mapping for public IP: %s and private IP: %s" %(source_ip, destination_ip)) interfaces_command = get_interfaces_command('Delete', source_ip) iptables_command = get_ip_tables_command('Delete', source_ip , destination_ip) command = interfaces_command + iptables_command else: logger.debug("Removing VNC mapping from NAT for public IP %s host IP %s public VNC port %s private VNC port %s" %(source_ip, destination_ip, source_port, destination_port)) command = get_ip_tables_command('Delete', source_ip , destination_ip, source_port, destination_port) # Create SSH session to execute all commands on NAT box. execute_remote_bulk_cmd(nat_ip, nat_user, command) elif nat_type == NAT_TYPE_HARDWARE: #This function is to be implemented raise Exception("No implementation for NAT type hardware") elif nat_type == NAT_TYPE_MAPPING: # This function is to be implemented return else: raise Exception("NAT type is not supported")
def clear_all_timedout_vnc_mappings(): # Get all active VNC mappings from DB current.db("FLUSH QUERY CACHE") vnc_mappings = current.db( (current.db.vnc_access.status == VNC_ACCESS_STATUS_ACTIVE) & (current.db.vnc_access.expiry_time < get_datetime())).select() if (vnc_mappings != None) & (len(vnc_mappings) != 0): for mapping in vnc_mappings: logger.debug( 'Removing VNC mapping for vm id: %s, host: %s, source IP: %s, source port: %s, destination port: %s' % (mapping.vm_id, mapping.host_id, mapping.token, mapping.vnc_source_port, mapping.vnc_destination_port)) f = open("/home/www-data/token.list", "r") lines = f.readlines() f.close() f = open("/home/www-data/token.list", "w") token = mapping.token logger.debug("token is : " + str(token)) logger.debug("token type is : " + str(type(token))) for line in lines: if token not in line: logger.debug("lines are : " + str(line)) f.write(line) f.close() current.db(current.db.vnc_access.id == mapping.id).delete() current.db.commit() logger.debug("Done clearing novnc mappings") else: raise Exception("NAT type is not supported")
def send_email_vm_warning(task_type, vm_users, vm_name, vm_shutdown_time): vm_action_time = get_datetime() + timedelta(days=20) cc_user_list = [] cc_user_list.append("*****@*****.**") for vm_user in vm_users: user_info = get_user_details(vm_user) if user_info[1] != None: context = dict(entityName=vm_name, userName=user_info[0], vmShutdownDate=vm_shutdown_time, vmActionDate=vm_action_time) logger.debug("Inside send warning e-mail for vm:" + vm_name + ", userName:"******", vmShutdownDate:" + str(vm_shutdown_time) + ", vmDeleteDate:" + str(vm_action_time)) if task_type == VM_TASK_WARNING_SHUTDOWN: send_email(user_info[1], SHUTDOWN_WARNING_SUBJECT, SHUTDOWN_WARNING_BODY, context, cc_user_list) elif task_type == VM_TASK_WARNING_DELETE: send_email(user_info[1], DELETE_WARNING_SUBJECT, DELETE_WARNING_BODY, context, cc_user_list) else: logger.debug("Not a valid task type") return vm_action_time
def check_response_body_upload(res): if not check_response_body_default(res): if not re.search('"code": *254', res): logger.debug('check_response_body_upload: status code is NOT OK') return False logger.debug('check_response_body_upload: code is 254 = identical version already exists') return True
def reset(self, startup=False): logger.debug('WorkHubCore.reset: startup=%s' % startup) uninstall_wrapped_apps() self.prep(reinstall=True) if startup: self.startup() device.send_key(device.KEY_HOME) return self
def wait_until_gone(self, timeout): logger.debug('UiObj.wait_until_gone: path=%s, timeout=%s' % (self._path, timeout)) result = uiautomator.test('TestUiObject', dict(path=self._path, action='WaitUntilGone', timeout=(timeout * 1000))) logger.debug('UiObj.wait_until_gone: result=%s' % result) return result
def get_device_info_by_mac(mac): logger.debug('get_device_info_by_mac: mac=%s' % mac) p = dict(api='/api1/devices/by-mac/%s' % mac, method='GET') sout = call_api(p) if not sout: return None return json.loads(sout)['device-info']
def stop_driver(wd): global _ports logger.debug('quit_driver: wd=%s' % wd) for p, d in _ports.iteritems(): if d == wd: _ports[p] = None break wd.quit()
def get_app_metadata(packbund): logger.debug('get_app_metadata: packbund=%s' % packbund) p = dict(api='/api1/apps/%s/metadata' % packbund, method='GET') sout = call_api(p) if not sout: logger.debug('get_app_metadata: failed to get app metadata') return None return json.loads(sout)['metadata']
def initialize(jar_folder, jar_name, packbund): global _jar_folder, _jar_name, _packbund logger.debug('initialize: jar_folder=%s, jar_name=%s, packbund=%s' % (jar_folder, jar_name, packbund)) _jar_folder = jar_folder _jar_name = jar_name _packbund = packbund sout, serr = android_device.run_adb('push "%s" /data/local/tmp' % os.path.join(jar_folder, jar_name)) if not serr or not re.match('^\d+ KB/s', serr): raise Exception('Failed to install uiautomator jar', __name__)
def create_public_ip_mapping_in_nat(vm_id): vm_data = current.db.vm_data[vm_id] try: create_mapping(vm_data.public_ip, vm_data.private_ip) logger.debug("Updating DB") current.db(current.db.public_ip_pool.public_ip == vm_data.public_ip).update(vm_id = vm_id) except: log_exception()
def get_result(self, action, param={}): param['path'] = self._path param['action'] = action if not uiautomator.test('TestUiObject', param): return None match = re.search(r'^TestUiObject.RESULT:(.*)', uiautomator.lastout(), flags=re.MULTILINE) if not match: return None logger.debug('UiObj.get_result: result=%s' % match.group(1)) return match.group(1)
def prep(self, reinstall=False): logger.debug('iOSApp.prep: reinstall=%s' % reinstall) if reinstall: try: self.initiate() self._wd.remove_app(self._packbund) self.stop() except Exception as e: if not appiummanager.is_launch_failure(self._packbund): raise e
def verify_pasted_text(self, verify, block, errmsg): current = UiObj('/EditText[@description="Pasted text:"]').get_text() logger.debug('TAoneApp.paste_text: EditText.getText()=%s' % current) logger.debug('TAoneApp.paste_text: verify=%s' % verify) if current == verify: if block: raise Exception('%s: Text was pasted unexpectedly --- should NOT be pasted' % errmsg) else: if not block: raise Exception('%s: Text was NOT pasted successfully' % errmsg)
def prep(self, policy=None, reinstall=False): logger.debug('AndroidApp.prep: policy=%s, reinstall=%s' % (policy, reinstall)) if self._packbund in settings.local.apk_files: if not android_device.is_app_installed(self._packbund) or reinstall is True or reinstall == 'always': apk_file = os.path.join(settings.local.apk_folder, settings.local.apk_files[self._packbund]) if not android_device.install_app(self._packbund, apk_file, reinstall=True): raise Exception('Failed to install app "%s" on device' % self._packbund) else: raise Exception('Unknown app %s to prepare' % self._packbund) return self
def open_with(self, app, workspace=False): chooser = self.app_chooser() if chooser: if not chooser.find_app_chooser_text(app, click=True): raise Exception('Failed to open the data with %s' % app) else: logger.debug('AndroidApp.open_with: No app chooser found --- probably the app has already started up') if self.is_android_error(): raise Exception('Failed to open the data with %s: Android error' % app) return self
def prep(self, policy=None, reinstall=False): logger.debug('WorkHubCore.prep: reinstall=%s' % reinstall) apk_file = settings.local.android_workhub_path if not os.path.exists(apk_file): logger.debug('WorkHubCore.prep: workhub has not been downloaded yet') if not connection.download_workhub('android', apk_file): raise Exception('Failed to download workhub') if not device.install_app(connection.workhub_packbund(), apk_file, reinstall): raise Exception('Failed to install workhub') return self
def set_app_on_device(device_id, packbund, action): logger.debug('set_app_on_device_by_mac: device_id=%s, packbund=%s, action=%s' % (device_id, packbund, action)) if action not in ('enable', 'disable', 'wipe'): return False p = dict(api='/api1/devices/%s/apps/%s/%s' % (device_id, packbund, action), method='POST') sout = call_api(p) if not sout: return False return True
def prep(self, policy=None, reinstall=False): logger.debug('MobileSecurity.prep: reinstall=%s' % reinstall) apk_file = settings.local.nms_path if not os.path.exists(apk_file): logger.debug('MobileSecurity.prep: msn has not been downloaded yet') if not connection.download_nms(apk_file): raise Exception('Failed to download nms') if not device.install_app(self._packbund, apk_file, reinstall): raise Exception('Failed to install nms') return self
def clear_all_timedout_vnc_mappings(): """ Deletes all timed-out VNC mappings from NAT """ nat_type, nat_ip, nat_user = _get_nat_details() if nat_type == NAT_TYPE_SOFTWARE: logger.debug("Clearing all timed out VNC mappings from NAT box %s" %(nat_ip)) # Get all active VNC mappings from DB vnc_mappings = current.db((current.db.vnc_access.status == VNC_ACCESS_STATUS_ACTIVE) & (current.db.vnc_access.expiry_time < get_datetime())).select() if (vnc_mappings != None) & (len(vnc_mappings) != 0): # Delete the VNC mapping from NAT if the duration of access has past its requested time duration command = '' for mapping in vnc_mappings: logger.debug('Removing VNC mapping for vm id: %s, host: %s, source IP: %s, source port: %s, destination port: %s' %(mapping.vm_id, mapping.host_id, mapping.vnc_server_ip, mapping.vnc_source_port, mapping.vnc_destination_port)) host_ip = mapping.host_id.host_ip.private_ip # Delete rules from iptables on NAT box command += ''' iptables -D PREROUTING -t nat -i %s -p tcp -d %s --dport %s -j DNAT --to %s:%s iptables -D FORWARD -p tcp -d %s --dport %s -j ACCEPT''' %(NAT_PUBLIC_INTERFACE, mapping.vnc_server_ip, mapping.vnc_source_port, host_ip, mapping.vnc_destination_port, host_ip, mapping.vnc_destination_port) # Update DB for each VNC access current.db(current.db.vnc_access.id == mapping.id).update(status=VNC_ACCESS_STATUS_INACTIVE) command += ''' /etc/init.d/iptables-persistent save /etc/init.d/iptables-persistent reload exit ''' current.db.commit() execute_remote_bulk_cmd(nat_ip, nat_user, command) logger.debug("Done clearing vnc mappings") elif nat_type == NAT_TYPE_HARDWARE: # This function is to be implemented raise Exception("No implementation for NAT type hardware") elif nat_type == NAT_TYPE_MAPPING: # This function is to be implemented logger.debug('Clearing all timed out VNC mappings') # Get all active VNC mappings from DB vnc_mappings = current.db((current.db.vnc_access.status == VNC_ACCESS_STATUS_ACTIVE) & (current.db.vnc_access.expiry_time < get_datetime())).select() if (vnc_mappings != None) & (len(vnc_mappings) != 0): for mapping in vnc_mappings: # Update DB for each VNC access current.db(current.db.vnc_access.id == mapping.id).update(status=VNC_ACCESS_STATUS_INACTIVE) current.db.commit() logger.debug("Done clearing vnc mappings") else: raise Exception("NAT type is not supported")
def find_element(self, xpath, text=None): logger.debug('iOSApp.find_element: xpath=%s, text=%s' % (xpath, text)) try: we = self._wd.find_element_by_xpath(xpath) if we: logger.debug('id: %s, text: %s' % (we.id, we.text)) if we is not None and text is not None and we.text != text: we = None except NoSuchElementException as e: we = None return we
def find_element_until(self, xpath, text=None, timeout=App.TIMEOUT_LAUNCH_MS): logger.debug('iOSApp.find_element_until: xpath=%s, text=%s, timeout=%s' % (xpath, text, timeout)) elapsed = 0 while elapsed < timeout: we = self.find_element(xpath, text) if we: return we time.sleep(0.5) elapsed += 500 logger.debug('iOSApp.find_element_until: elapsed=%s' % elapsed) return None
def disassociate_device_by_serial(serial): logger.debug('disassociate_device_by_serial: serial=%s' % serial) device_id = get_device_id(serial) if not device_id: return None p = dict(api='/api1/devices/%s/dissociate' % device_id, method='POST') sout = call_api(p) if not sout: return None return json.loads(sout)['status']
def remove_public_ip_mapping_from_nat(vm_id): vm_data = current.db.vm_data[vm_id] try: remove_mapping(vm_data.public_ip, vm_data.private_ip) # Update DB logger.debug("Updating DB") vm_data.update_record(public_ip=None) except: log_exception()
def create_public_ip_mapping_in_nat(vm_id): vm_data = current.db.vm_data[vm_id] try: create_mapping(vm_data.public_ip, vm_data.private_ip) logger.debug("Updating DB") current.db( current.db.public_ip_pool.public_ip == vm_data.public_ip).update( vm_id=vm_id) except: log_exception()
def remove_vnc_mapping_from_nat(vm_id): vm_data = current.db.vm_data[vm_id] vnc_host_ip = config.get("GENERAL_CONF", "vnc_ip") host_ip = vm_data.host_id.host_ip.private_ip vnc_port = vm_data.vnc_port try: remove_mapping(vnc_host_ip, host_ip, vnc_port, vnc_port) logger.debug("Updating DB") current.db(current.db.vnc_access.vm_id == vm_id).update( status=VNC_ACCESS_STATUS_INACTIVE) except: log_exception()
def clear_all_nat_mappings(db): """ Clears mappings from NAT """ nat_type, nat_ip, nat_user = _get_nat_details() if nat_type == NAT_TYPE_SOFTWARE: logger.debug("Clearing all NAT mappings") command = '' # For all public IP - private IP mappings, Delete aliases for vm_data_info in db(db.vm_data.public_ip != None).select(): private_ip = vm_data_info.private_ip.private_ip public_ip = vm_data_info.public_ip.public_ip logger.debug( 'Removing private to public IP mapping for private IP: %s and public IP:%s' % (private_ip, public_ip)) # private_ip_octets = mapping['private_ip'].split('.') public_ip_octets = public_ip.split('.') interface_alias = "%s:%s.%s.%s" % ( NAT_PUBLIC_INTERFACE, public_ip_octets[1], public_ip_octets[2], public_ip_octets[3]) command += ''' rm /etc/network/interfaces.d/2_%s.cfg ifconfig %s down cat /etc/network/interfaces.d/*.cfg > /etc/network/interfaces ''' % (interface_alias, interface_alias) # Flushing all rules from iptables command += ''' iptables --flush iptables -t nat --flush iptables --delete-chain iptables -t nat --delete-chain /etc/init.d/iptables-persistent save /etc/init.d/iptables-persistent reload exit ''' execute_remote_bulk_cmd(nat_ip, nat_user, command) # Updating DB logger.debug( "Flushing all public Ip - private IP mappings and VNC mappings from DB" ) db.vm_data.update(public_ip=None) db.vnc_access.update(status=VNC_ACCESS_STATUS_INACTIVE) elif nat_type == NAT_TYPE_HARDWARE: # This function is to be implemented raise Exception("No implementation for NAT type hardware") elif nat_type == NAT_TYPE_MAPPING: logger.debug("Clearing all mapping information from DB") db.vm_data.update(public_ip=None) db.vnc_access.update(status=VNC_ACCESS_STATUS_INACTIVE) else: raise Exception("NAT type is not supported")
def remove_mapping(source_ip, destination_ip, source_port=-1, destination_port=-1): """ Function to remove mapping from NAT If NAT type is software_nat then for removing public IP - private IP mapping: - Remove the alias on NAT listening on the public IP. - Delete rules from iptables to forward the traffic coming on public IP to private IP and vice versa. - Flush the entries from DB and make public IP free. For revoking VNC access: - Delete rules from iptables to forward the VNC traffic to the host. - Update DB to make the VNC access inactive. """ nat_type, nat_ip, nat_user = _get_nat_details() if nat_type == NAT_TYPE_SOFTWARE: # source_port and destination_port are -1 when function is called for removing public IP - private IP mapping if source_port == -1 and destination_port == -1: logger.debug( "Removing mapping for public IP: %s and private IP: %s" % (source_ip, destination_ip)) interfaces_command = _get_interfaces_command('Delete', source_ip) iptables_command = _get_ip_tables_command('Delete', source_ip, destination_ip) command = interfaces_command + iptables_command else: logger.debug( "Removing VNC mapping from NAT for public IP %s host IP %s public VNC port %s private VNC port %s" % (source_ip, destination_ip, source_port, destination_port)) command = _get_ip_tables_command('Delete', source_ip, destination_ip, source_port, destination_port) # Create SSH session to execute all commands on NAT box. execute_remote_bulk_cmd(nat_ip, nat_user, command) elif nat_type == NAT_TYPE_HARDWARE: #This function is to be implemented raise Exception("No implementation for NAT type hardware") elif nat_type == NAT_TYPE_MAPPING: # This function is to be implemented return else: raise Exception("NAT type is not supported")
def push_email(to_address, email_subject, email_message, reply_to_address, cc_addresses=[]): if config.getboolean("MAIL_CONF", "mail_active"): logger.debug("Sending mail to %s with subject %s" % (to_address, email_subject)) rtn = mail.send(to=to_address, subject=email_subject, message=email_message, reply_to=reply_to_address, cc=cc_addresses) logger.error("ERROR:: " + str(mail.error)) logger.info("EMAIL STATUS:: " + str(rtn))
def scheduler_task_update_callback(dbset, new_fields): if 'status' in new_fields and 'next_run_time' in new_fields: if new_fields['status']=='TIMEOUT': db_query = dbset.as_dict()['query'] id_query = db_query.as_dict()['first'] row = db(id_query).select().first() if 'task_event_id' in row.vars: param_dict = ast.literal_eval(row.vars) task_event_id = param_dict['task_event_id'] logger.debug("Task TimedOut with task_event_id: "+ str(task_event_id)) task_timeout_cleanup(task_event_id,row) else: logger.debug("Task TimedOut without cleanup")
def send_email_delete_vm_warning(vm_users, vm_name, vm_shutdown_time): vm_delete_time = get_datetime() + timedelta(days=15) for vm_user in vm_users: user_info = get_user_details(vm_user) if user_info[1] != None: context = dict(vmName=vm_name, userName=user_info[0], vmShutdownDate=vm_shutdown_time, vmDeleteDate=vm_delete_time) logger.debug("Inside send mail delete vm warning function:" + vm_name + ", userName:"******", vmShutdownDate:" + str(vm_shutdown_time) + ", vmDeleteDate:" + str(vm_delete_time)) send_email(user_info[1], DELETE_WARNING_SUBJECT, DELETE_WARNING_BODY, context) return vm_delete_time
def task_timeout_cleanup(task_event_id, scheduler_row): logger.debug("cleaning up for "+scheduler_row.status+" task: " + str(task_event_id)) task_event_data = db.task_queue_event[task_event_id] task_queue_data = db.task_queue[task_event_data.task_id] if task_queue_data.status == TASK_QUEUE_STATUS_PENDING: #On return, update the status and end time in task event table msg = "" if scheduler_row.status == 'TIMEOUT': msg = "Task Timeout " # + task_event_data['message'] elif scheduler_row.status == 'FAILED': rows = db(db.scheduler_run.task_id==scheduler_row.id).select() rows.sort(lambda row: row.stop_time, reverse=True) msg = rows.first().traceback task_event_data.update_record(status=TASK_QUEUE_STATUS_FAILED, message=msg, end_time=get_datetime()) task_queue_data.update_record(status=TASK_QUEUE_STATUS_FAILED)
def remove_mapping(source_ip, destination_ip, source_port=-1, destination_port=-1): nat_type, nat_ip, nat_user = get_nat_details() if nat_type == NAT_TYPE_SOFTWARE: # source_port and destination_port are -1 when function is called for removing public IP - private IP mapping if source_port == -1 and destination_port == -1: logger.debug( "Removing mapping for public IP: %s and private IP: %s" % (source_ip, destination_ip)) interfaces_command = get_interfaces_command('Delete', source_ip) iptables_command = get_ip_tables_command('Delete', source_ip, destination_ip) command = interfaces_command + iptables_command else: logger.debug( "Removing VNC mapping from NAT for public IP %s host IP %s public VNC port %s private VNC port %s" % (source_ip, destination_ip, source_port, destination_port)) command = get_ip_tables_command('Delete', source_ip, destination_ip, source_port, destination_port) # Create SSH session to execute all commands on NAT box. execute_remote_bulk_cmd(nat_ip, nat_user, command) elif nat_type == NAT_TYPE_HARDWARE: #This function is to be implemented raise Exception("No implementation for NAT type hardware") elif nat_type == NAT_TYPE_MAPPING: # This function is to be implemented return else: raise Exception("NAT type is not supported")
def create_mapping(source_ip, destination_ip, source_port=-1, destination_port=-1, duration=-1): """ Function to create mappings in NAT If NAT type is software_nat then for creating public - private IP mapping: - Create the alias on NAT that will listen on the public IP. - Create rules in iptables to forward the traffic coming on public IP to private IP and vice versa. For providing VNC access: - Check if the NAT is listening on the public IP to be used for VNC access. - If NAT is not listening on the desired public IP, create an alias to listen on that IP. - Create rules in iptables to forward the VNC traffic to the host. """ nat_type, nat_ip, nat_user = _get_nat_details() if nat_type == NAT_TYPE_SOFTWARE: if source_port == -1 & destination_port == -1: logger.debug("Adding public ip %s private ip %s mapping on NAT" % (source_ip, destination_ip)) interfaces_command = _get_interfaces_command('Add', source_ip) iptables_command = _get_ip_tables_command('Add', source_ip, destination_ip) command = interfaces_command + iptables_command else: logger.debug( "Creating VNC mapping on NAT box for public IP %s host IP %s public VNC port %s private VNC port %s duration %s" % (source_ip, destination_ip, source_port, destination_port, duration)) logger.debug("Creating SSH session on NAT box %s" % (nat_ip)) interfaces_command = _get_interfaces_command('Add', source_ip) iptables_command = _get_ip_tables_command('Add', source_ip, destination_ip, source_port, destination_port) command = interfaces_command + iptables_command # Create SSH session to execute all commands on NAT box. execute_remote_bulk_cmd(nat_ip, nat_user, command) elif nat_type == NAT_TYPE_HARDWARE: # This function is to be implemented raise Exception("No implementation for NAT type hardware") elif nat_type == NAT_TYPE_MAPPING: # This function is to be implemented return else: raise Exception("NAT type is not supported")
def create_mapping(source_ip, destination_ip, source_port=-1, destination_port=-1, duration=-1): nat_type, nat_ip, nat_user = get_nat_details() if nat_type == NAT_TYPE_SOFTWARE: if source_port == -1 & destination_port == -1: logger.debug("Adding public ip %s private ip %s mapping on NAT" % (source_ip, destination_ip)) interfaces_command = get_interfaces_command('Add', source_ip) iptables_command = get_ip_tables_command('Add', source_ip, destination_ip) command = interfaces_command + iptables_command else: logger.debug( "Creating VNC mapping on NAT box for public IP %s host IP %s public VNC port %s private VNC port %s duration %s" % (source_ip, destination_ip, source_port, destination_port, duration)) logger.debug("Creating SSH session on NAT box %s" % (nat_ip)) interfaces_command = get_interfaces_command('Add', source_ip) iptables_command = get_ip_tables_command('Add', source_ip, destination_ip, source_port, destination_port) command = interfaces_command + iptables_command # Create SSH session to execute all commands on NAT box. execute_remote_bulk_cmd(nat_ip, nat_user, command) elif nat_type == NAT_TYPE_HARDWARE: # This function is to be implemented raise Exception("No implementation for NAT type hardware") elif nat_type == NAT_TYPE_MAPPING: # This function is to be implemented return else: raise Exception("NAT type is not supported")
def clear_all_timedout_vnc_mappings(): nat_type, nat_ip, nat_user = get_nat_details() if nat_type == NAT_TYPE_SOFTWARE: logger.debug("Clearing all timed out VNC mappings from NAT box %s" % (nat_ip)) # Get all active VNC mappings from DB vnc_mappings = current.db( (current.db.vnc_access.status == VNC_ACCESS_STATUS_ACTIVE) & (current.db.vnc_access.expiry_time < get_datetime())).select() if (vnc_mappings != None) & (len(vnc_mappings) != 0): # Delete the VNC mapping from NAT if the duration of access has past its requested time duration command = '' for mapping in vnc_mappings: logger.debug( 'Removing VNC mapping for vm id: %s, host: %s, source IP: %s, source port: %s, destination port: %s' % (mapping.vm_id, mapping.host_id, mapping.vnc_server_ip, mapping.vnc_source_port, mapping.vnc_destination_port)) host_ip = mapping.host_id.host_ip.private_ip # Delete rules from iptables on NAT box command += ''' iptables -D PREROUTING -t nat -i %s -p tcp -d %s --dport %s -j DNAT --to %s:%s iptables -D FORWARD -p tcp -d %s --dport %s -j ACCEPT''' % ( NAT_PUBLIC_INTERFACE, mapping.vnc_server_ip, mapping.vnc_source_port, host_ip, mapping.vnc_destination_port, host_ip, mapping.vnc_destination_port) # Update DB for each VNC access current.db(current.db.vnc_access.id == mapping.id).update( status=VNC_ACCESS_STATUS_INACTIVE) command += ''' /etc/init.d/iptables-persistent save /etc/init.d/iptables-persistent reload exit ''' current.db.commit() execute_remote_bulk_cmd(nat_ip, nat_user, command) logger.debug("Done clearing vnc mappings") elif nat_type == NAT_TYPE_HARDWARE: # This function is to be implemented raise Exception("No implementation for NAT type hardware") elif nat_type == NAT_TYPE_MAPPING: # This function is to be implemented logger.debug('Clearing all timed out VNC mappings') # Get all active VNC mappings from DB vnc_mappings = current.db( (current.db.vnc_access.status == VNC_ACCESS_STATUS_ACTIVE) & (current.db.vnc_access.expiry_time < get_datetime())).select() if (vnc_mappings != None) & (len(vnc_mappings) != 0): for mapping in vnc_mappings: # Update DB for each VNC access current.db(current.db.vnc_access.id == mapping.id).update( status=VNC_ACCESS_STATUS_INACTIVE) current.db.commit() logger.debug("Done clearing vnc mappings") else: raise Exception("NAT type is not supported")
def vm_data_delete_callback(dbset): for vm_data in dbset.select(): logger.debug('Deleting references for ' + vm_data.vm_identity) db(db.vm_data.parent_id == vm_data.id).update(parent_id = None)
def get_vm_operations(vm_id): vm_operations = { 'start_vm': ('user', 'on-off.png', 'Turn on this virtual machine'), 'suspend_vm': ('user', 'pause2.png', 'Suspend this Virtual Machine'), 'resume_vm': ('user', 'play2.png', 'Unpause this virtual machine'), 'stop_vm': ('user', 'shutdown2.png', 'Gracefully shut down this virtual machine'), 'destroy_vm': ('user', 'on-off.png', 'Forcefully power off this virtual machine'), 'clone_vm': ('user', 'clonevm.png', 'Request VM Clone'), 'attach_extra_disk': ('user', 'disk.jpg', 'Attach Extra Disk'), 'snapshot': ('user', 'snapshot.png', 'Take VM snapshot'), 'edit_vm_config': ('user', 'editme.png', 'Edit VM Config'), 'show_vm_performance': ('user', 'performance.jpg', 'Check VM Performance'), 'vm_history': ('user', 'history.png', 'Show VM History'), 'confirm_vm_deletion()': (None, 'delete.png', 'Delete this virtual machine'), 'vnc_url()': ('user', 'vnc.jpg', 'Grant VNC Access'), 'migrate_vm': ('admin', 'migrate.png', 'Migrate this virtual machine'), 'user_details': ('admin', 'user_add.png', 'Add User to VM'), 'save_as_template': ('user', 'save.png', 'Save as Template'), 'mail_user': ('admin', 'email_icon.png', 'Send Mail to users of the VM'), 'affinity_host': ('admin', 'affinity.png', 'Set Affinity') } valid_operations_list = [] vm_status = db.vm_data[vm_id].status if vm_status not in (VM_STATUS_UNKNOWN, VM_STATUS_IN_QUEUE): valid_operations = ['snapshot', 'show_vm_performance', 'affinity_host'] if vm_status == VM_STATUS_RUNNING: valid_operations.extend(['suspend_vm', 'stop_vm', 'destroy_vm']) elif vm_status == VM_STATUS_SUSPENDED: valid_operations.extend(['resume_vm']) elif vm_status == VM_STATUS_SHUTDOWN: #Start VM option is not valid if edit VM or attach disk option is in queue if not (is_request_in_queue(vm_id, VM_TASK_EDIT_CONFIG) | is_request_in_queue(vm_id, VM_TASK_ATTACH_DISK)): valid_operations.extend(['start_vm']) valid_operations.extend([ 'clone_vm', 'edit_vm_config', 'attach_extra_disk', 'save_as_template' ]) if not is_general_user(): valid_operations.extend(['confirm_vm_deletion()']) if is_moderator(): valid_operations.extend(['migrate_vm']) valid_operations.extend(['user_details']) valid_operations.extend(['mail_user']) valid_operations.extend(['vnc_url()', 'vm_history']) #Disable all links if Delete VM option is in queue link_disabled = True if is_request_in_queue(vm_id, VM_TASK_DELETE) else False for valid_operation in valid_operations: op_data = vm_operations[valid_operation] op_image = IMG(_src=URL('static', 'images/' + op_data[1]), _style='height:20px;weight:20px') if link_disabled: valid_operations_list.append(op_image) else: if op_data[0] != None: if op_data[2] == 'Grant VNC Access': logger.debug("checking vnc" + str(valid_operation)) valid_operations_list.append( A(op_image, _title=op_data[2], _alt=op_data[2], _onclick=valid_operation)) else: valid_operations_list.append( A(op_image, _title=op_data[2], _alt=op_data[2], _href=URL(r=request, c=op_data[0], f=valid_operation, args=[vm_id]))) else: logger.debug("checking vnc" + str(valid_operation)) valid_operations_list.append( A(op_image, _title=op_data[2], _alt=op_data[2], _onclick=valid_operation)) else: logger.error("INVALID VM STATUS!!!") raise return valid_operations_list