def post(self, tracking_id): logger.debug("requeue import tracking") self.check_token() values = self.post_values() self.require_arguments(values, ['state']) state = values['state'] try: tracking_id = utils.decrypt_value(tracking_id) tracking = db.get_import_tracking(tracking_id) print(tracking) tracking['state'] = state for k in ['id', 'create_time', 'update_time']: del tracking[k] tracking[ 'log'] = f"requeue import tracker {tracking_id} and changed state to {state}" tracking_id = db.add_import_tracking(tracking) tracking_id = utils.encrypt_value(tracking_id) submit_mq_job(tracking_id, "import") self.send_response_200() except Exception as e: logger.error(f"Request import tracking error {e}") import stacktrace self.send_response_404()
def check_token(self, tokens: [] = None): global token header_token = None auth_header = self.request.headers.get('Authorization', None) # print("Auth header: {}".format( auth_header )) if auth_header: header_token = auth_header[7:] # logger.debug( f"Header Token: {header_token}") valid_token = False if tokens is not None and header_token in tokens: valid_token = True if token is not None and header_token == token: valid_token = True if not valid_token: logger.debug( f"'{header_token}' token is not valid ==> main: '{token}' OR proxy: {tokens}" ) self.send_response_401() return token
def get(self, user: str = None, instance_id: str = None): logger.debug("get Export list") logger.debug(proxy_keys) self.check_token(proxy_keys) filter = self.arguments() # potential states: 'new', 'upload', 'waiting', 'queued', 'running', 'ok', 'error', 'paused', 'deleted', 'deleted_new' + 'pre-queueing' # Ones we care about when polling: 'new', 'waiting', 'queued', 'running' + 'pre-queueing' self.valid_arguments(filter, [ 'state', ]) if 'state' in filter and filter['state'] not in [ 'new', 'upload', 'waiting', 'queued', 'running', 'ok', 'error', 'paused', 'deleted', 'deleted_new', 'pre-queueing', 'fetch-running', 'fetch-ok', 'fetch-error', 'nels-transfer-queue', 'nels-transfer-running', 'nels-transfer-ok', 'nels-transfer-error' ]: return self.send_response_400( data="Invalid value for state {}".format(filter['state'])) if instance_id is not None: filter['instance'] = instances[instance_id]['name'] if user is not None and user != 'all': filter['user_email'] = user # pp.pprint( filter ) exports = utils.encrypt_ids(db.get_export_trackings(**filter)) self.send_response(data=exports)
def _wait_for_volume_deletion(self, id: str, sleep_time: float = 0.05, timeout: float = 20.0): """ hangs till the volume has been deleted. Args: id volume to watch sleep_time: amount of time to sleep between checks timeout: max time to check before raising a RuntimeError Returns: None Raises: RuntimeError if volume not deleted within the timeout time """ logger.debug("Waiting for volume {} being deleted".format(id)) while (True): if (self._volume_exists(id) == False): logger.debug( "Volume {} has been successfully deleted".format(id)) return time.sleep(sleep_time) timeout -= sleep_time if (timeout < 0.0): raise RuntimeError("Volume {} has not been deleted".format(id))
def get_history_from_nels(tracker): tracker_id = tracker['id'] logger.info(f'{tracker_id}: push export start') try: master_api.update_import(tracker_id, {'state': 'nels-transfer-running'}) tmpfile = "{}/{}.tgz".format(tempfile.mkdtemp(dir=tmp_dir), tracker['id']) print(f"TMPFILE {tmpfile}") ssh_info = get_ssh_credential(tracker['nels_id']) logger.debug(f"{tracker_id} ssh info {ssh_info}") cmd = f'scp -o StrictHostKeyChecking=no -o BatchMode=yes -i {ssh_info["key_file"]} "{ssh_info["username"]}@{ssh_info["hostname"]}:{tracker["source"]}" {tmpfile}' # logger.debug(f"CMD: {cmd}") run_cmd(cmd, 'pull data') master_api.update_import(tracker_id, { 'state': 'nels-transfer-ok', 'tmpfile': tmpfile }) submit_mq_job(tracker_id, "import") except Exception as e: import traceback traceback.print_tb(e.__traceback__) master_api.update_import(tracker_id, {'state': 'nels-transfer-error'}) logger.debug( f" tracker-id:{tracker['id']} transfer to NeLS error: {e}")
def server_stop(self, id: str, timeout: int = 300): """ stops a server Args: id: the name of the server timeout: max time (s) to wait for the server to shotdown Returns: None Raises: TimeoutError: if the server is not in shutdown status within the timeout time """ self.check_connection() logger.debug("Stopping server id{} ".format(id)) server = self._connection.compute.get_server(id) self._connection.compute.stop_server(server) while (True): server = self._connection.compute.get_server(id) if (server.status.lower() == 'shutoff'): return timeout -= 1 if (not timeout): raise TimeoutError('timeout before the VM was shutdown') logger.debug("sleeping in server stop TO:{} status:{}".format( timeout, server.status)) time.sleep(1) logger.info("Server stopped id:{} ".format(id))
def connect(self, auth_url: str, project_name: str, username: str, password: str, region_name: str, user_domain_name: str, project_domain_name: str, **kwargs): """ Connects to a openstack cloud Args: auth_url: authentication url project_name: name of project to connect to username: name of the user password: password for the user region_name: global connection **kwargs catches extra cloud information from the config file Returns: None Raises: None """ self._connection = openstack.connect( auth_url=auth_url, project_name=project_name, username=username, password=password, region_name=region_name, user_domain_name=user_domain_name, project_domain_name=project_domain_name) logger.debug("Connected to openstack server")
def get(self, all=False): logger.debug("get history imports list") self.check_token() filter = self.arguments() self.valid_arguments(filter, [ 'state', ]) if 'state' not in filter: filter['state'] = '' if 'state' in filter and filter['state'] not in [ 'new', 'upload', 'waiting', '', 'queued', 'running', 'ok', 'error', 'paused', 'deleted', 'deleted_new', 'pre-queueing', 'all' ]: return self.send_response_400( data="Invalid value for state {}".format(filter['state'])) if all == 'all': imports = db.get_all_imports(state=filter['state']) else: imports = db.get_imports(state=filter['state']) imports = utils.list_encrypt_ids(imports) return self.send_response(data=imports)
def server_log_search(self, id: str, match: str): """ get a server log and searches for a match Args: id: id/name of the server match: regex/str of log entry to look for Returns: matches found in log, if none found returns an empty list Raises: None """ log = self.server_log(id) logger.debug("Spooling server log for id:{}".format(id)) results = [] # logger.debug( log ) for line in log.split("\n"): if (re.search(match, line)): results.append(line) return results
def get(self, tracking_id): logger.debug("get tracking details") self.check_token() tracking_id = utils.decrypt_value(tracking_id) tracking = utils.encrypt_ids(db.get_export_tracking(tracking_id)) self.send_response(data=tracking)
def post(self, state_id): logger.debug(f"POST VALUES: {self.request.body}") nels_id = int(self.get_body_argument("nelsId", default=None)) location = self.get_body_argument("selectedFiles", default=None) state = states.get(state_id) if state is None: self.send_response_404() logger.debug(f"State info for import: {state}") try: user = state['user'] tracking_id = self._register_import(user, nels_id, location) tracking_id = utils.encrypt_value(tracking_id) submit_mq_job(tracking_id, "import") self.redirect(galaxy_url) except Exception as e: logger.error(f"Error during import registation: {e}") logger.debug(f"State info for import: {state}") logger.debug(f"nels_id: {nels_id}") logger.debug(f"location: {location}") self.send_response_400()
async def get(self): logger.debug("get rabbitmq connection status") self.check_token() try: if mq.connection.is_open: mq.connection.process_data_events() alive = True message = "" else: alive = False logger.error("Connection is NOT open") message = "Connection is NOT open" except pika.exceptions.ConnectionClosed as e: logger.error(e) alive = False message = "Connection closed: " + str(e) except pika.exceptions.StreamLostError as e: logger.error(e) alive = False message = "Connection lost: " + str(e) except: logger.error(sys.exc_info()[0].args[0]) alive = False message = "Unspecific error: " + sys.exc_info()[0].args[0] if alive: return self.send_response(data={'status': alive}) else: return self.send_response_503(data={ 'status': alive, 'message': message })
def get(self, state_id): logger.debug("get state") self.check_token() data = states.get(state_id) if data is None: return self.send_response_404() return self.send_response(data=data)
def patch(self, tracking_id): logger.debug("patch tracking details") self.check_token() data = self.post_values() self.valid_arguments(data, ['state', 'export_id', 'tmpfile', 'show']) # need to decrypt the id otherwise things blow up! tracking_id = utils.decrypt_value(tracking_id) db.update_export_tracking(tracking_id, data) return self.send_response_204()
def get(self): logger.debug("proxy endpoint test") # logger.debug( f"Having proxy_keys {proxy_keys}" ) self.check_token(proxy_keys) data = { 'instance': instances[instance_id]['name'], 'version': version, 'proxy-connection': True } return self.send_response(data=data)
def get(self): logger.debug("get info") self.check_token() df = os.statvfs(galaxy_file_path) perc_free = df.f_bavail / df.f_blocks * 100.0 free_size = df.f_bavail * df.f_bsize / 1e9 return self.send_response(data={ "id": instance_id, "perc_free": perc_free, 'free_gb': free_size })
def firewall_add_incoming_rule(self, name: str, port: int, protocol: str, remote_group: str = None, remote_ip_range: str = None): """ adds an incoming firewall rule for a security group Args: name of the security_group port to open for protocol, we like tcp and udp so far remote_group: if intranet communication between nodes in this security group remote_ip_range: ip range filtering. Returns: None Raises: None RuntimeError if a groupd with this name already exists. """ groups = self.security_groups() if name not in groups: raise RuntimeError( "Unknown security group {} tp update".format(name)) remote_group_id = None if remote_group is not None: if remote_group not in groups: raise RuntimeError( "Unknown remote security group '{}'".format(name)) else: remote_group_id = groups[remote_group]['id'] if 'rules' in groups[name]: for rule in groups[name]['rules']: if (rule['ports'] == (port, port) and rule['protocol'] == protocol and rule['direction'] == 'ingress' and rule['remote_group_id'] == remote_group_id and rule['remote_ip_range'] == remote_ip_range): logger.debug('firewall rule already exists, skipping it') return self.security_group_add_rule(id=groups[name]['id'], direction='ingress', port=port, protocol=protocol, remote_group_id=remote_group_id, remote_ip_range=remote_ip_range)
def get(self, user_email): logger.debug("get user histories") self.check_token() user = db.get_user(email=user_email) if user is None or user == []: return self.send_response_404() # Should only be one user with a given email! user = user[0] user_histories = utils.encrypt_ids(db.get_user_histories(user['id'])) return self.send_response(data=user_histories)
def patch(self): logger.debug("patch TOS") user_tos = self.get_tos() data = tornado.json_decode(self.request.body) if 'status' in data and data['status'] == 'accepted': logger.info("Updating TOS for {}".format(user_tos['user_id'])) user_tos['status'] = 'accepted' user_tos['tos_date'] = datetime.datetime.now() db.update_tos(user_tos) return self.send_response_204() return self.send_response_404()
def _register_import(self, user_id: int, nels_id: int, source: str): logger.debug("registering export") tracking = { 'user_id': user_id, 'state': 'pre-fetch', 'nels_id': nels_id, 'source': source } # Need this function next # if not db.history_export_exists(tracking): tracking_id = db.add_import_tracking(tracking) return tracking_id
def servers(self): servers = [] for server in self._connection.compute.servers(): ip = self.server_ip(server.id) servers.append({ 'id': server.id, 'name': server.name.lower(), 'ip': ip, 'status': server.status.lower() }) logger.debug("Servers: \n{}".format(pp.pformat(servers))) return servers
def get(self, ): logger.debug("get jobs list") self.check_token() filter = self.arguments() logger.debug(f'arguments {filter}') self.valid_arguments(filter, ['time_delta', 'user_id']) time_delta = filter.get('time_delta', "60m") #default 1 hour time_delta = utils.timedelta_to_sec(time_delta) user_id = filter.get('user_id', None) if user_id is not None: user_id = utils.decrypt_value(user_id) jobs = db.get_jobs(time_delta=time_delta, user_id=user_id) return self.send_response(data=utils.list_encrypt_ids(jobs))
def patch(self, tracking_id): logger.debug("patch tracking details") user = self.get_user() if user is None: self.send_response_404() data = self.arguments() self.require_arguments(data, ['show']) # need to decrypt the id otherwise things blow up! tracking_id = utils.decrypt_value(tracking_id) tracking = db.get_export_tracking(tracking_id) logger.debug(tracking) if user['email'] != tracking['user_email']: self.send_response_401() db.update_export_tracking(tracking_id, data) return self.send_response_204()
def _register_export(self, instance: str, user: str, history_id: str, nels_id: int, destination: str): logger.debug("registering export") tracking = { 'instance': instance, 'user_email': user, 'history_id': history_id, 'state': 'pre-queueing', 'nels_id': nels_id, 'destination': destination } # Need this function next # if not db.history_export_exists(tracking): tracking_id = db.add_export_tracking(tracking) return tracking_id
def get(self): logger.debug("get TOS") user_tos = self.get_tos() logger.debug("getting tos for {}".format(user_tos['user_id'])) res = {} if user_tos['status'] == 'grace': time_diff = (user_tos['tos_date'] - datetime.datetime.now()) if time_diff.days >= 0: res['grace_period'] = "{} days".format(time_diff.days + 1) else: user_tos['status'] = 'expired' db.update_tos(user_tos) res['status'] = user_tos['status'] return self.send_response(data=res)
def server_remove_floating_ips(self, id: str) -> None: """ removes floating IPs from a server, this is a cpouta fix""" server = self._connection.compute.get_server(id) logger.debug( 'Checking if Floating IP is assigned to testing_instance...') ips_removed = 0 for values in server.addresses.values(): for address in values: if address['OS-EXT-IPS:type'] == 'floating': logger.debug("Removing floating ip: {}".format( address['addr'])) # server.remove_floating_ip_from_server(address=address['addr']) self._connection.compute.remove_floating_ip_from_server( server=server, address=address['addr']) ips_removed += 1 return ips_removed
def get(self): logger.debug("request history import") user = self.get_user() if user is None or user == []: return self.send_response_401() data = {'user': user['id']} uuid = states.set(data) redirect_url = f"{nels_url}/welcome.xhtml" redirect_url += f"?nels_file_browser&appCallbackUrl={master_url}/import/" + uuid + "/" if DEV: print(redirect_url) return self.redirect(redirect_url)
def run_cmd(cmd: str, name: str = None, verbose: bool = False): logger.debug(f"run-cmd: {cmd}") exec_info = run_utils.launch_cmd(cmd) logger.debug("exit code: %s" % exec_info.p_status) logger.debug("std out: %s" % exec_info.stdout) logger.debug("std error: %s" % exec_info.stderr) return exec_info.p_status
def get(self, user_email: str = None): logger.debug("get user api-key") self.check_token() user = db.get_user(email=user_email) if user is None or user == []: return self.send_response_404() print(user) user = user[0] api_key = db.get_api_key(user['id']) # print( api_key ) # print( api_key ) if api_key is None or api_key == []: new_key = utils.create_uuid(32) db.add_api_key(user['id'], new_key) return self.send_response(data={'api_key': new_key}) return self.send_response(data={'api_key': api_key['key']})
def server_delete(self, id: str): """ Deletes a server instance Args: id: name/id of a server Returns: None Raises: None """ if self.server(id) is None: logger.debug("Unknown server to delete id:{}".format(id)) raise RuntimeError("Unknown server {}".format(id)) self._connection.delete_server(id) logger.debug("Deleted server id:{}".format(id))