def stop_interactive_session(project_uuid, pipeline_uuid) -> bool: """Stops an interactive session. Args: project_uuid: pipeline_uuid: Returns: True if the session was stopped, false if no session was found. """ session = models.InteractiveSession.query.filter_by( project_uuid=project_uuid, pipeline_uuid=pipeline_uuid).one_or_none() if session is None: return False session.status = "STOPPING" db.session.commit() session_obj = InteractiveSession.from_container_IDs( docker_client, container_IDs=session.container_ids, network="orchest", notebook_server_info=session.notebook_server_info, ) # TODO: error handling? session_obj.shutdown() db.session.delete(session) db.session.commit() return True
def _collateral(self, project_uuid: str, pipeline_uuid: str): # Could be none when the _transaction call sets them to None # because there is no session to shutdown. This is a way that # the _transaction function effectively tells the _collateral # function to not be run. if not project_uuid or not pipeline_uuid: return session = models.InteractiveSession.query.filter_by( project_uuid=project_uuid, pipeline_uuid=pipeline_uuid ).one_or_none() if session is None: return session_obj = InteractiveSession.from_container_IDs( docker_client, container_IDs=session.container_ids, network=_config.DOCKER_NETWORK, notebook_server_info=session.notebook_server_info, ) # TODO: error handling? # TODO: If we can do this task in the background then the # request can return. The session shutting down should not # depend on the existence of the object in the DB. Need to make # sure if it is indeed possible, e.g. if there are no race # conditions when shutting down and starting another interactive # session at the same time. session_obj.shutdown() db.session.delete(session) db.session.commit()
def _collateral( self, project_uuid: str, pipeline_uuid: str, pipeline_path: str, project_dir: str, host_userdir: str, ): session = InteractiveSession(docker_client, network=_config.DOCKER_NETWORK) session.launch( pipeline_uuid, project_uuid, pipeline_path, project_dir, host_userdir, ) # Update the database entry with information to connect to the # launched resources. IP = session.get_containers_IP() status = { "status": "RUNNING", "container_ids": session.get_container_IDs(), "jupyter_server_ip": IP.jupyter_server, "notebook_server_info": session.notebook_server_info, } models.InteractiveSession.query.filter_by( project_uuid=project_uuid, pipeline_uuid=pipeline_uuid ).update(status) db.session.commit()
def _background_session_start( cls, app, project_uuid: str, pipeline_uuid: str, pipeline_path: str, project_dir: str, host_userdir: str, ): with app.app_context(): try: session = InteractiveSession(docker_client, network=_config.DOCKER_NETWORK) session.launch( pipeline_uuid, project_uuid, pipeline_path, project_dir, host_userdir, ) # Update the database entry with information to connect # to the launched resources. IP = session.get_containers_IP() # with for update to avoid overwriting the state of a # STOPPING instance. session_entry = ( models.InteractiveSession.query.with_for_update( ).populate_existing().filter_by( project_uuid=project_uuid, pipeline_uuid=pipeline_uuid).one_or_none()) if session_entry is None: return session_entry.container_ids = session.get_container_IDs() session_entry.jupyter_server_ip = IP.jupyter_server session_entry.notebook_server_info = session.notebook_server_info # Do not overwrite the STOPPING status if the session is # stopping. if session_entry.status == "LAUNCHING": session_entry.status = "RUNNING" db.session.commit() except Exception as e: current_app.logger.error(e) # Error handling. If it does not succeed then the # initial entry has to be removed from the database as # otherwise no session can be started in the future due # to the uniqueness constraint. models.InteractiveSession.query.filter_by( project_uuid=project_uuid, pipeline_uuid=pipeline_uuid).delete() db.session.commit()
def put(self, pipeline_uuid): """Restarts the memory-server of the session.""" session = models.InteractiveSession.query.get_or_404( pipeline_uuid, description='Session not found') session_obj = InteractiveSession.from_container_IDs( docker_client, container_IDs=session.container_ids, network='orchest', ) # Note: The entry in the database does not have to be updated # since restarting the `memory-server` does not change its # Docker ID. session_obj.restart_resource(resource_name='memory-server') return {'message': 'Session restart was successful'}, 200
def put(self, project_uuid, pipeline_uuid): """Restarts the memory-server of the session.""" session = models.InteractiveSession.query.get_or_404( ident=(project_uuid, pipeline_uuid), description="Session not found") session_obj = InteractiveSession.from_container_IDs( docker_client, container_IDs=session.container_ids, network="orchest", notebook_server_info=session.notebook_server_info, ) # Note: The entry in the database does not have to be updated # since restarting the `memory-server` does not change its # Docker ID. session_obj.restart_resource(resource_name="memory-server") return {"message": "Session restart was successful"}, 200
def post(self): """Launches an interactive session.""" post_data = request.get_json() # TODO: error handling. If it does not succeed then the initial # entry has to be removed from the database as otherwise # no session can be started in the future due to unique # constraint. # Add initial entry to database. pipeline_uuid = post_data["pipeline_uuid"] pipeline_path = post_data["pipeline_path"] project_uuid = post_data["project_uuid"] interactive_session = { "project_uuid": project_uuid, "pipeline_uuid": pipeline_uuid, "status": "LAUNCHING", } db.session.add(models.InteractiveSession(**interactive_session)) db.session.commit() session = InteractiveSession(docker_client, network="orchest") session.launch( pipeline_uuid, project_uuid, pipeline_path, post_data["project_dir"], post_data["settings"]["data_passing_memory_size"], post_data["host_userdir"], ) # Update the database entry with information to connect to the # launched resources. IP = session.get_containers_IP() interactive_session.update({ "status": "RUNNING", "container_ids": session.get_container_IDs(), "jupyter_server_ip": IP.jupyter_server, "notebook_server_info": session.notebook_server_info, }) models.InteractiveSession.query.filter_by( project_uuid=project_uuid, pipeline_uuid=pipeline_uuid).update(interactive_session) db.session.commit() return interactive_session, 201
def _collateral( self, container_ids: Dict[str, str], notebook_server_info: Dict[str, str] = None, ): if container_ids is None: return session_obj = InteractiveSession.from_container_IDs( docker_client, container_IDs=container_ids, network=_config.DOCKER_NETWORK, notebook_server_info=notebook_server_info, ) # Note: The entry in the database does not have to be updated # since restarting the `memory-server` does not change its # Docker ID. session_obj.restart_resource(resource_name="memory-server")
def delete(self, pipeline_uuid): """Shutdowns session.""" session = models.InteractiveSession.query.get_or_404( pipeline_uuid, description='Session not found') session.status = 'STOPPING' db.session.commit() session_obj = InteractiveSession.from_container_IDs( docker_client, container_IDs=session.container_ids, network='orchest', ) # TODO: error handling? session_obj.shutdown() db.session.delete(session) db.session.commit() return {'message': 'Session shutdown was successful'}, 200
def delete(self, project_uuid, pipeline_uuid): """Shutdowns session.""" session = models.InteractiveSession.query.get_or_404( ident=(project_uuid, pipeline_uuid), description="Session not found") session.status = "STOPPING" db.session.commit() session_obj = InteractiveSession.from_container_IDs( docker_client, container_IDs=session.container_ids, network="orchest", ) # TODO: error handling? session_obj.shutdown() db.session.delete(session) db.session.commit() return {"message": "Session shutdown was successful"}, 200
def _background_session_start( cls, app, project_uuid: str, pipeline_uuid: str, pipeline_path: str, project_dir: str, host_userdir: str, ): with app.app_context(): try: session = InteractiveSession(docker_client, network=_config.DOCKER_NETWORK) session.launch( pipeline_uuid, project_uuid, pipeline_path, project_dir, host_userdir, ) # Update the database entry with information to connect # to the launched resources. IP = session.get_containers_IP() status = { "status": "RUNNING", "container_ids": session.get_container_IDs(), "jupyter_server_ip": IP.jupyter_server, "notebook_server_info": session.notebook_server_info, } models.InteractiveSession.query.filter_by( project_uuid=project_uuid, pipeline_uuid=pipeline_uuid).update(status) db.session.commit() except Exception as e: current_app.logger.error(e) # Error handling. If it does not succeed then the # initial entry has to be removed from the database as # otherwise no session can be started in the future due # to the uniqueness constraint. models.InteractiveSession.query.filter_by( project_uuid=project_uuid, pipeline_uuid=pipeline_uuid).delete() db.session.commit()
def _background_session_stop( cls, app, project_uuid: str, pipeline_uuid: str, container_ids: Dict[str, str], notebook_server_info: Dict[str, str] = None, ): with app.app_context(): try: session_obj = InteractiveSession.from_container_IDs( docker_client, container_IDs=container_ids, network=_config.DOCKER_NETWORK, notebook_server_info=notebook_server_info, ) # TODO: error handling? session_obj.shutdown() # Deletion happens here and not in the transactional # phase because this way we can show the session # STOPPING to the user. models.InteractiveSession.query.filter_by( project_uuid=project_uuid, pipeline_uuid=pipeline_uuid).delete() db.session.commit() except Exception as e: current_app.logger.error(e) # Make sure that the session is deleted in any case, # because otherwise the user will not be able to have an # active session for the given pipeline. session = models.InteractiveSession.query.filter_by( project_uuid=project_uuid, pipeline_uuid=pipeline_uuid).one() db.session.delete(session) db.session.commit()
def post(self): """Launches an interactive session.""" post_data = request.get_json() # TODO: error handling. If it does not succeed then the initial # entry has to be removed from the database as otherwise # no session can be started in the future due to unique # constraint. # Add initial entry to database. pipeline_uuid = post_data['pipeline_uuid'] interactive_session = { 'pipeline_uuid': pipeline_uuid, 'status': 'LAUNCHING', } db.session.add(models.InteractiveSession(**interactive_session)) db.session.commit() session = InteractiveSession(docker_client, network='orchest') session.launch(pipeline_uuid, post_data['pipeline_dir'], post_data['host_userdir']) # Update the database entry with information to connect to the # launched resources. IP = session.get_containers_IP() interactive_session.update({ 'status': 'RUNNING', 'container_ids': session.get_container_IDs(), 'jupyter_server_ip': IP.jupyter_server, 'notebook_server_info': session.notebook_server_info, }) models.InteractiveSession.query \ .filter_by(pipeline_uuid=pipeline_uuid) \ .update(interactive_session) db.session.commit() return interactive_session, 201
def _background_session_stop( cls, app, project_uuid: str, pipeline_uuid: str, container_ids: Dict[str, str], notebook_server_info: Dict[str, str], previous_state: str, ): # Note that a session that is still LAUNCHING should not be # killed until it has done launching, because the jupyterlab # user configuration is managed through a lock that is removed # by the jupyterlab start script. See PR #254. with app.app_context(): try: # Wait for the session to be STARTED before killing it. if previous_state == "LAUNCHING": n = 600 for _ in range(n): session = models.InteractiveSession.query.filter_by( project_uuid=project_uuid, pipeline_uuid=pipeline_uuid).one_or_none() # The session has been deleted because the # launch failed or because of another failure # reason. if session is None: return # We have to rely on the container ids and not # on status because a session that is STOPPED # while LAUNCHING will never reach a RUNNING # state because the background task will # explicitly avoid doing so. if session.container_ids is not None: container_ids = session.container_ids notebook_server_info = session.notebook_server_info break # Otherwise we will get an old version of # the session data. db.session.close() time.sleep(1) session_obj = InteractiveSession.from_container_IDs( docker_client, container_IDs=container_ids, network=_config.DOCKER_NETWORK, notebook_server_info=notebook_server_info, ) # TODO: error handling? session_obj.shutdown() # Deletion happens here and not in the transactional # phase because this way we can show the session # STOPPING to the user. models.InteractiveSession.query.filter_by( project_uuid=project_uuid, pipeline_uuid=pipeline_uuid).delete() db.session.commit() except Exception as e: current_app.logger.error(e) # Make sure that the session is deleted in any case, # because otherwise the user will not be able to have an # active session for the given pipeline. session = models.InteractiveSession.query.filter_by( project_uuid=project_uuid, pipeline_uuid=pipeline_uuid).one() db.session.delete(session) db.session.commit()