def check_docker_status_new(app_logger, uuidcode, servername): """ Headers: intern-authorization uuidcode email servername """ docker_master_token = utils_file_loads.get_docker_master_token() s_email, s_servername = servername.split(':') headers = { "Intern-Authorization": docker_master_token, "uuidcode": uuidcode, "email": s_email, "servername": s_servername } urls = utils_file_loads.get_urls() url = urls.get('dockermaster', {}).get('url_jlab', '<no_jlab_url_defined>') with closing(requests.get(url, headers=headers, verify=False, timeout=30)) as r: app_logger.debug("uuidcode={} - DockerMaster Response: {} !{}!".format( uuidcode, r.status_code, r.text.strip().replace("\\", "").replace("'", "").replace('"', '').lower())) if r.status_code == 200: return r.text.strip().replace("\\", "").replace("'", "").replace( '"', '').lower() == "true" else: return False
def delete_docker_new(app_logger, uuidcode, servername, app_urls): app_logger.trace( "uuidcode={} - Try to delete docker container named: {}".format( uuidcode, servername)) docker_master_token = utils_file_loads.get_docker_master_token() s_email, s_servername = servername.split(':') headers = { "Intern-Authorization": docker_master_token, "uuidcode": uuidcode, "email": s_email, "servername": s_servername } urls = utils_file_loads.get_urls() url = urls.get('dockermaster', {}).get('url_jlab', '<no_jlab_url_defined>') with closing( requests.delete(url, headers=headers, verify=False, timeout=30)) as r: app_logger.debug("uuidcode={} - DockerMaster Response: {} {}".format( uuidcode, r.status_code, r.text)) if r.status_code != 202: app_logger.error( "uuidcode={} - Could not Delete JupyterLab via DockerMaster". format(uuidcode)) # Kill the tunnel tunnel_info = {"servername": servername} try: app_logger.debug("uuidcode={} - Close ssh tunnel".format(uuidcode)) tunnel_communication.close( app_logger, uuidcode, app_urls.get('tunnel', {}).get('url_tunnel'), tunnel_info) except: app_logger.exception( "uuidcode={} - Could not stop tunnel. tunnel_info: {} {}".format( uuidcode, tunnel_info, app_urls.get('tunnel', {}).get('url_tunnel')))
def create_unicore8_job_dashboard(app_logger, uuidcode, request_json, project, unicore_input, escapedusername): app_logger.debug("uuidcode={} - Create UNICORE/X-8 Job.".format(uuidcode)) env_list = [] for key, value in request_json.get('Environment', {}).items(): env_list.append('{}={}'.format(key, value)) job = { 'ApplicationName': 'Bash shell', 'Environment': env_list, 'Imports': [] } unicorex_info = utils_file_loads.get_unicorex() if unicorex_info.get(request_json.get('system').upper(), {}).get('set_project', False): if unicorex_info.get(request_json.get('system').upper(), {}).get( 'projects', {}).get('ALL', '') != '': job['Project'] = unicorex_info.get( request_json.get('system').upper(), {}).get('projects', {}).get('ALL', '') elif unicorex_info.get(request_json.get('system').upper(), {}).get( 'projects', {}).get(project.lower(), '') != '': job['Project'] = unicorex_info.get( request_json.get('system').upper(), {}).get('projects', {}).get(project.lower(), '') elif unicorex_info.get(request_json.get('system').upper(), {}).get('projects_truncate', False): job['Project'] = project[1:] else: job['Project'] = project for inp in unicore_input: job['Imports'].append({ "From": "inline://dummy", "To": inp.get('To'), "Data": inp.get('Data') }) urls = utils_file_loads.get_urls() ux_notify = urls.get('hub', {}).get( 'url_ux', '<no_url_for_unicore_notification_configured>') ux_notify_server_name = "{}_{}_{}".format( len(uuidcode), uuidcode, request_json.get('Environment', {}).get('JUPYTERHUB_SERVER_NAME')) ux_notify = ux_notify.replace('<user>', escapedusername).replace( '<server>', ux_notify_server_name) job['Notification'] = ux_notify if request_json.get('partition') in ['LoginNode', 'LoginNodeVis']: job['Executable'] = '/bin/bash' job['Arguments'] = ['.start.sh'] job['Job type'] = 'interactive' if request_json.get('partition') in ['LoginNodeVis']: nodes = unicorex_info.get(request_json.get('system').upper(), {}).get('LoginNodeVis', []) if len(nodes) > 0: # get system list ... choose one ... use it node = random.choice(nodes) app_logger.trace( "uuidcode={} - Use random VIS Node: {}".format( uuidcode, node)) job['Login node'] = node elif 'LoginNodeVis' in unicorex_info.get( request_json.get('system').upper(), {}).keys(): # this system supports vis nodes. So we have to set the non vis nodes explicitly nodes = unicorex_info.get(request_json.get('system').upper(), {}).get('LoginNode', []) if len(nodes) > 0: # get system list ... choose one ... use it node = random.choice(nodes) app_logger.trace( "uuidcode={} - Use random non-VIS Node: {}".format( uuidcode, node)) job['Login node'] = node app_logger.trace("uuidcode={} - UNICORE/X Job: {}".format( uuidcode, job)) return job if unicorex_info.get(request_json.get('system').upper(), {}).get('queues', False): job['Resources'] = {'Queue': request_json.get('partition')} else: job['Resources'] = {} if request_json.get('reservation', None): if len(request_json.get('reservation', '')) > 0 and request_json.get( 'reservation', 'none').lower() != 'none': job['Resources']['Reservation'] = request_json.get('reservation') for key, value in request_json.get('Resources').items(): job['Resources'][key] = value job['Executable'] = '/bin/bash' job['Arguments'] = ['.start.sh'] app_logger.debug("uuidcode={} - UNICORE/X-8 Job: {}".format(uuidcode, job)) return job
def start_docker_new(app_logger, uuidcode, app_database, request_headers, port, service, dashboard, environment, app_urls): """ Headers: intern-authorization uuidcode accesstoken expire servicelevel Body: servername email environments image port jupyterhub_api_url """ servername = request_headers.get('servername') account = request_headers.get('account') jhubtoken = request_headers.get('jhubtoken') servicelevel = request_headers.get('servicelevel', 'default') app_tunnel_url = app_urls.get('tunnel', {}).get('url_tunnel') app_tunnel_url_remote = app_urls.get('tunnel', {}).get('url_remote') servername_at = servername.replace('@', '_at_') email = servername_at.split(':')[0] servername_short = servername_at.split(':')[1] # Users should be allowed to use their access tokens in the JupyterLab. To get a maximum lifespan of this token for the user, we renew it before we're starting the JupyterLab. accesstoken, expire = renew_token( app_logger, uuidcode, request_headers.get('tokenurl'), request_headers.get('authorizeurl'), request_headers.get('refreshtoken'), request_headers.get('accesstoken'), request_headers.get('expire'), jhubtoken, app_urls.get('hub', {}).get('url_proxy_route'), app_urls.get('hub', {}).get('url_token'), request_headers.get('escapedusername'), servername, app_database, True) dashboards = {} if service == "JupyterLab": dockerimage = utils_file_loads.image_name_to_image(account) elif service == "Dashboard": dashboards = utils_file_loads.get_dashboards() dockerimage = dashboards.get(dashboard, {}).get("image") else: dockerimage = utils_file_loads.image_name_to_image(account) app_logger.debug("uuidcode={} - Add server to database: {}".format( uuidcode, servername_at)) utils_db.create_entry_docker(app_logger, uuidcode, app_database, servername, jhubtoken, port, dockerimage) docker_master_token = utils_file_loads.get_docker_master_token() environment["HPCACCOUNTS"] = get_hpc_accounts( app_logger, uuidcode, environment.get('hpcaccounts', [])) urls = utils_file_loads.get_urls() url = urls.get('dockermaster', {}).get('url_jlab', '<no_jlab_url_defined>') headers = { "Intern-Authorization": docker_master_token, "uuidcode": uuidcode, "accesstoken": accesstoken, "expire": str(expire), "servicelevel": servicelevel } body = { "servername": servername_short, "service": service, "dashboard": dashboard, "email": email, "environments": environment, "image": dockerimage, "port": port, "jupyterhub_api_url": environment.get('JUPYTERHUB_API_URL', 'http://j4j_proxy:8000/hub/api') } with closing( requests.post(url, headers=headers, json=body, verify=False, timeout=30)) as r: if r.status_code == 200: app_logger.debug( "uuidcode={} - DockerMaster response: Positive".format( uuidcode)) tunnel_header = { 'Intern-Authorization': utils_file_loads.get_j4j_tunnel_token(), 'uuidcode': uuidcode } tunnel_data = { 'account': servername, 'system': "hdfcloud", 'hostname': uuidcode, 'port': port } hdfcloud = get_hdfcloud() nodes = hdfcloud.get('nodes', []) node = tunnel_communication.get_remote_node( app_logger, uuidcode, app_tunnel_url_remote, nodes) app_logger.debug("uuidcode={} - Use {} as node for tunnel".format( uuidcode, node)) for i in range(0, 10): try: tunnel_communication.j4j_start_tunnel( app_logger, uuidcode, app_tunnel_url, tunnel_header, tunnel_data) except: if i == 9: app_logger.exception( "uuidcode={} - Could not start Tunnel for HDF-Cloud JupyterLab: {}" .format(uuidcode, uuidcode)) return False sleep(3) break return True elif r.status_code == 501: app_logger.debug( "uuidcode={} - DockerMaster response: Negative".format( uuidcode)) return False else: app_logger.error( "uuidcode={} - DockerMaster unknown response: {} {}".format( uuidcode, r.status_code, r.text)) return False