def insert_data_cell(doc_id, index, cell_type, context=None, meta=None, sid="", session=None): assert_can_write(doc_id, session=session) verify_data_doc_permission(doc_id, session=session) data_cell = logic.create_data_cell(cell_type=cell_type, context=context, meta=meta, commit=False, session=session) logic.insert_data_doc_cell(data_doc_id=doc_id, cell_id=data_cell.id, index=index, session=session) data_cell_dict = data_cell.to_dict() socketio.emit( "data_cell_inserted", ( sid, index, data_cell_dict, ), namespace=DATA_DOC_NAMESPACE, room=doc_id, broadcast=True, ) return data_cell_dict
def update_user_list(data_doc_id, add=False, redis_conn=None): """Add/Remove user in data doc's redis map Arguments: data_doc_id {[number]} -- [Id of data doc] Keyword Arguments: add {bool} -- (default: {False}) """ # Update the list of users in This room key = f"data_doc/{data_doc_id}/users" should_send_info = False if add: now = int(time.time()) should_send_info = ( redis_conn.hset(key, request.sid, f"{current_user.id}|{now}") == 1 ) else: redis_conn.hdel(key, request.sid) should_send_info = True if should_send_info: socketio.emit( "data_doc_user", (add, request.sid, current_user.id), namespace=DATA_DOC_NAMESPACE, room=data_doc_id, broadcast=True, )
def __init__(self, query_execution_id, celery_task, query, statement_ranges): self._query_execution_id = query_execution_id self._celery_task = celery_task self._task_id = celery_task.request.id self.statement_execution_ids = [] self._query = query self._statement_ranges = statement_ranges # logging variable self._has_log = False self._log_cache = "" # [statement_logs] self._meta_info = None # statement_urls self._percent_complete = 0 # percent_complete self._statement_progress = {} # Connect to mysql db with DBSession() as session: query_execution = qe_logic.update_query_execution( self._query_execution_id, task_id=self._task_id, session=session ).to_dict() # Emit a event from socketio socketio.emit( "query_received", query_execution, namespace=QUERY_EXECUTION_NAMESPACE, room=self._query_execution_id, )
def on_cancel(self): utcnow = datetime.datetime.utcnow() if len(self.statement_execution_ids) > 0: statement_execution_id = self.statement_execution_ids[-1] upload_path, has_log = self._upload_log(statement_execution_id) qe_logic.update_statement_execution( statement_execution_id, status=StatementExecutionStatus.CANCEL, completed_at=utcnow, has_log=self._has_log, log_path=upload_path if has_log else None, ) with DBSession() as session: query_execution = qe_logic.update_query_execution( self._query_execution_id, status=QueryExecutionStatus.CANCEL, completed_at=utcnow, session=session, ).to_dict() socketio.emit( "query_cancel", query_execution, namespace=QUERY_EXECUTION_NAMESPACE, room=self._query_execution_id, )
def update_datadoc_editor( id, write=None, read=None, originator=None, # Used for websocket to identify sender, optional ): with DBSession() as session: editor = logic.get_data_doc_editor_by_id(id, session=session) if editor: assert_can_write(editor.data_doc_id, session=session) editor = logic.update_data_doc_editor(id, read, write, session=session) if editor: editor_dict = editor.to_dict() socketio.emit( "data_doc_editor", ( originator, editor_dict["data_doc_id"], editor_dict["uid"], editor_dict, ), namespace="/datadoc", room=editor_dict["data_doc_id"], broadcast=True, ) return editor_dict
def move_data_doc_cursor(data_doc_id, data_cell_id=None): update_user_cursor(data_doc_id, data_cell_id) socketio.emit( "data_doc_cursor_moved", (request.sid, data_cell_id,), namespace=DATA_DOC_NAMESPACE, room=data_doc_id, broadcast=True, )
def remove_datadoc_access_request(doc_id, uid, originator=None): assert_can_write(doc_id) logic.remove_datadoc_access_request(doc_id=doc_id, uid=uid) socketio.emit( "data_doc_access_request", (originator, doc_id, uid, None), namespace="/datadoc", room=doc_id, broadcast=True, )
def create_query_execution(query, engine_id, data_cell_id=None, originator=None): with DBSession() as session: verify_query_engine_permission(engine_id, session=session) uid = current_user.id query_execution = logic.create_query_execution(query=query, engine_id=engine_id, uid=uid, session=session) data_doc = None if data_cell_id: datadoc_logic.append_query_executions_to_data_cell( data_cell_id, [query_execution.id], session=session) data_cell = datadoc_logic.get_data_cell_by_id(data_cell_id, session=session) data_doc = data_cell.doc try: run_query_task.apply_async(args=[ query_execution.id, ]) query_execution_dict = query_execution.to_dict() if data_doc: socketio.emit( "data_doc_query_execution", ( originator, query_execution_dict, data_cell_id, ), namespace="/datadoc", room=data_doc.id, broadcast=True, ) return query_execution_dict except Exception as e: # We might encounter ConnectionError caused by # Redis connection failing logic.create_query_execution_error( query_execution.id, error_type=None, error_message_extracted="Encountered Error connecting to Redis", error_message=str(e), commit=False, session=session, ) query_execution.status = QueryExecutionStatus.ERROR session.commit() raise e
def on_statement_update( self, log: str = "", meta_info: str = None, percent_complete=None, ): statement_execution_id = self.statement_execution_ids[-1] updated_meta_info = False if meta_info is not None and self._meta_info != meta_info: self._meta_info = meta_info qe_logic.update_statement_execution( statement_execution_id, meta_info=meta_info ) updated_meta_info = True has_log = len(log) if has_log: self._stream_log(statement_execution_id, log) percent_complete_change = ( percent_complete is not None and self._percent_complete != percent_complete ) if percent_complete_change: self._percent_complete = percent_complete if updated_meta_info or has_log or percent_complete_change: statement_update_dict = { "query_execution_id": self._query_execution_id, "id": statement_execution_id, } if updated_meta_info: statement_update_dict["meta_info"] = meta_info if has_log: statement_update_dict["log"] = [log] if percent_complete_change: statement_update_dict["percent_complete"] = percent_complete self._statement_progress = { statement_execution_id: { "percent_complete": percent_complete, } } self.update_progress() socketio.emit( "statement_update", statement_update_dict, namespace=QUERY_EXECUTION_NAMESPACE, room=self._query_execution_id, )
def send_data_doc_session_info(data_doc_id, room, redis_conn=None): user_dict = get_and_expire_user_dict(data_doc_id, redis_conn=redis_conn) cursor_dict = get_and_expire_cursor_dict( data_doc_id, user_dict, redis_conn=redis_conn ) socketio.emit( "data_doc_sessions", {"users": user_dict, "cursors": cursor_dict}, namespace=DATA_DOC_NAMESPACE, room=room, broadcast=True, )
def fetch_data_doc_editors(doc_id): with DBSession() as session: doc = datadoc_collab.get_datadoc(doc_id, session=session) if doc: editors = logic.get_data_doc_editors_by_doc_id(doc_id, session=session) editor_dicts = [editor.to_dict() for editor in editors] socketio.emit( "data_doc_editors", (request.sid, editor_dicts), namespace=DATA_DOC_NAMESPACE, broadcast=False, room=request.sid, ) send_data_doc_session_info(doc_id, room=request.sid)
def handler(*args, **kwargs): if require_auth and not current_user.is_authenticated: flask.abort(401) # start_time = time_utils.now_millis() try: fn(*args, **kwargs) except Exception as e: LOG.error(e, exc_info=True) socketio.emit( "error", str(e), namespace=namespace, broadcast=False, room=flask.request.sid, )
def on_query_end(self): with DBSession() as session: query_execution = qe_logic.update_query_execution( self._query_execution_id, status=QueryExecutionStatus.DONE, completed_at=datetime.datetime.utcnow(), session=session, ).to_dict() socketio.emit( "query_end", query_execution, namespace=QUERY_EXECUTION_NAMESPACE, room=self._query_execution_id, )
def handler(*args, **kwargs): if not current_user.is_authenticated: LOG.error("Unauthorized websocket access") disconnect() else: try: fn(*args, **kwargs) except Exception as e: LOG.error(e, exc_info=True) socketio.emit( "error", str(e), namespace=namespace, broadcast=False, room=flask.request.sid, )
def add_datadoc_editor( doc_id, uid, read=None, write=None, originator=None, # Used for websocket to identify sender, optional ): with DBSession() as session: assert_can_write(doc_id, session=session) editor = logic.create_data_doc_editor(data_doc_id=doc_id, uid=uid, read=read, write=write, commit=False) editor_dict = editor.to_dict() access_request = logic.get_data_doc_access_request_by_doc_id( doc_id=doc_id, uid=uid) if access_request: logic.remove_datadoc_access_request(doc_id=doc_id, uid=uid, commit=False) session.commit() # Update queries in elasticsearch to reflect new permissions logic.update_es_queries_by_datadoc_id(doc_id, session=session) if access_request: socketio.emit( "data_doc_access_request", (originator, doc_id, uid, None), namespace="/datadoc", room=doc_id, broadcast=True, ) socketio.emit( "data_doc_editor", (originator, doc_id, uid, editor_dict), namespace="/datadoc", room=doc_id, broadcast=True, ) logic.update_es_data_doc_by_id(doc_id) send_add_datadoc_editor_email(doc_id, uid, read, write) return editor_dict
def on_query_start(self): with DBSession() as session: query_execution = qe_logic.update_query_execution( self._query_execution_id, status=QueryExecutionStatus.RUNNING, session=session, ).to_dict() query_execution = spread_dict(query_execution, { "total": len(self._statement_ranges), }) socketio.emit( "query_start", query_execution, namespace=QUERY_EXECUTION_NAMESPACE, room=self._query_execution_id, ) self.update_progress()
def fetch_data_doc_access_requests(doc_id): with DBSession() as session: doc = datadoc_collab.get_datadoc(doc_id, session=session) if doc: access_requests = logic.get_data_doc_access_requests_by_doc_id( doc_id, session=session ) access_request_dicts = [ access_request.to_dict() for access_request in access_requests ] socketio.emit( "data_doc_access_requests", (request.sid, access_request_dicts), namespace=DATA_DOC_NAMESPACE, broadcast=False, room=request.sid, ) send_data_doc_session_info(doc_id, room=request.sid)
def add_datadoc_access_request(doc_id, originator=None): uid = current_user.id access_request_dict = None existing_access_requst = logic.get_data_doc_access_request_by_doc_id( doc_id=doc_id, uid=uid) if not existing_access_requst: access_request = logic.create_data_doc_access_request(doc_id=doc_id, uid=uid) access_request_dict = access_request.to_dict() socketio.emit( "data_doc_access_request", (originator, doc_id, uid, access_request_dict), namespace="/datadoc", room=doc_id, broadcast=True, ) send_datadoc_access_request_notification(doc_id=doc_id, uid=uid) return access_request_dict
def delete_datadoc_editor( id, originator=None, # Used for websocket to identify sender, optional ): with DBSession() as session: editor = logic.get_data_doc_editor_by_id(id, session=session) if editor: editor_dict = editor.to_dict() assert_can_write(editor.data_doc_id, session=session) logic.delete_data_doc_editor( id=id, doc_id=editor.data_doc_id, session=session ) socketio.emit( "data_doc_editor", (originator, editor_dict["data_doc_id"], editor_dict["uid"], None), namespace="/datadoc", room=editor_dict["data_doc_id"], broadcast=True, )
def on_exception(self, error_type: int, error_str: str, error_extracted: str): utcnow = datetime.datetime.utcnow() error_extracted = ( error_extracted[:5000] if error_extracted is not None and len(error_extracted) > 5000 else error_extracted ) with DBSession() as session: if len(self.statement_execution_ids) > 0: statement_execution_id = self.statement_execution_ids[-1] upload_path, has_log = self._upload_log(statement_execution_id) qe_logic.update_statement_execution( statement_execution_id, status=StatementExecutionStatus.ERROR, completed_at=utcnow, has_log=self._has_log, log_path=upload_path if has_log else None, session=session, ) qe_logic.create_query_execution_error( self._query_execution_id, error_type=error_type, error_message_extracted=error_extracted, error_message=error_str, session=session, ) query_execution = qe_logic.update_query_execution( self._query_execution_id, status=QueryExecutionStatus.ERROR, completed_at=utcnow, session=session, ).to_dict() socketio.emit( "query_exception", query_execution, namespace=QUERY_EXECUTION_NAMESPACE, room=self._query_execution_id, )
def update_data_cell(cell_id, fields, sid="", session=None): data_doc = logic.get_data_doc_by_data_cell_id(cell_id, session=session) assert_can_write(data_doc.id, session=session) verify_environment_permission([data_doc.environment_id]) data_cell = logic.update_data_cell( id=cell_id, session=session, **fields, ) data_cell_dict = data_cell.to_dict() socketio.emit( "data_cell_updated", (sid, data_cell_dict), namespace=DATA_DOC_NAMESPACE, room=data_doc.id, broadcast=True, ) return data_cell_dict
def delete_data_cell(doc_id, cell_id, sid="", session=None): assert_can_write(doc_id, session=session) verify_data_doc_permission(doc_id, session=session) logic.delete_data_doc_cell(data_doc_id=doc_id, data_cell_id=int(cell_id), session=session) socketio.emit( "data_cell_deleted", ( sid, cell_id, ), namespace=DATA_DOC_NAMESPACE, room=doc_id, broadcast=True, ) return True
def on_statement_end(self, cursor): statement_execution_id = self.statement_execution_ids[-1] qe_logic.update_statement_execution( statement_execution_id, status=StatementExecutionStatus.UPLOADING, ) socketio.emit( "statement_update", { "query_execution_id": self._query_execution_id, "id": statement_execution_id, "status": StatementExecutionStatus.UPLOADING.value, }, namespace=QUERY_EXECUTION_NAMESPACE, room=self._query_execution_id, ) result_path, result_row_count = self._upload_query_result( cursor, statement_execution_id ) upload_path, has_log = self._upload_log(statement_execution_id) statement_execution = qe_logic.update_statement_execution( statement_execution_id, status=StatementExecutionStatus.DONE, completed_at=datetime.datetime.utcnow(), result_row_count=result_row_count, has_log=self._has_log, result_path=result_path, log_path=upload_path if has_log else None, ).to_dict() self._statement_progress = {} self.update_progress() socketio.emit( "statement_end", statement_execution, namespace=QUERY_EXECUTION_NAMESPACE, room=self._query_execution_id, )
def on_statement_start(self, statement_index): self.reset_logging_variables() statement_range = self._statement_ranges[statement_index] statement_start, statement_end = statement_range statement_execution = qe_logic.create_statement_execution( self._query_execution_id, statement_start, statement_end, StatementExecutionStatus.RUNNING, ).to_dict() statement_execution_id = statement_execution["id"] self.statement_execution_ids.append(statement_execution_id) socketio.emit( "statement_start", statement_execution, namespace=QUERY_EXECUTION_NAMESPACE, room=self._query_execution_id, )
def update_datadoc(doc_id, fields, sid="", session=None): # Check to see if author has permission assert_can_write(doc_id, session=session) verify_data_doc_permission(doc_id, session=session) doc = logic.update_data_doc( id=doc_id, session=session, **fields, ) doc_dict = doc.to_dict() socketio.emit( "data_doc_updated", ( sid, doc_dict, ), namespace=DATA_DOC_NAMESPACE, room=doc_id, broadcast=True, ) return doc_dict
def update_datadoc_owner(doc_id, next_owner_id, originator=None): with DBSession() as session: # Add previous owner as an editor to the doc assert_is_owner(doc_id, session=session) current_owner_editor = logic.create_data_doc_editor( data_doc_id=doc_id, uid=current_user.id, read=True, write=True, commit=False, session=session, ) current_owner_editor_dict = current_owner_editor.to_dict() # Remove next owner as a doc editor next_owner_editor = logic.get_data_doc_editor_by_id( next_owner_id, session=session ) next_owner_editor_dict = next_owner_editor.to_dict() logic.delete_data_doc_editor( id=next_owner_id, doc_id=doc_id, session=session, commit=False ) next_owner_uid = next_owner_editor_dict["uid"] # Update doc owner to next owner doc = logic.update_data_doc( id=doc_id, commit=False, session=session, owner_uid=next_owner_uid ) doc_dict = doc.to_dict() session.commit() socketio.emit( "data_doc_editor", (originator, doc_id, current_user.id, current_owner_editor_dict), namespace="/datadoc", room=doc_id, broadcast=True, ) socketio.emit( "data_doc_editor", ( originator, next_owner_editor_dict["data_doc_id"], next_owner_editor_dict["uid"], None, ), namespace="/datadoc", room=next_owner_editor_dict["data_doc_id"], broadcast=True, ) socketio.emit( "data_doc_updated", (originator, doc_dict,), namespace="/datadoc", room=next_owner_editor_dict["data_doc_id"], broadcast=True, ) logic.update_es_data_doc_by_id(doc_id) send_datadoc_transfer_notification(doc_id, next_owner_uid, session) return current_owner_editor_dict
def move_data_cell(doc_id, from_index, to_index, sid="", session=None): assert_can_write(doc_id, session=session) verify_data_doc_permission(doc_id, session=session) logic.move_data_doc_cell( data_doc_id=doc_id, from_index=int(from_index), to_index=int(to_index), session=session, ) socketio.emit( "data_cell_moved", ( sid, from_index, to_index, ), namespace=DATA_DOC_NAMESPACE, room=doc_id, broadcast=True, ) # Should we return data instead? return True
def paste_data_cell(cell_id: int, cut: bool, doc_id: int, index: int, sid="", session=None): data_cell = logic.get_data_cell_by_id(cell_id, session=session) assert data_cell is not None, "Data cell does not exist" data_doc = logic.get_data_doc_by_id(doc_id, session=session) old_data_doc = data_cell.doc same_doc = old_data_doc.id == doc_id # Make sure they are in the same environment and have access assert (old_data_doc.environment_id == data_doc.environment_id ), "Must be in the same environment" verify_environment_permission([data_doc.environment_id]) # Users need to be able to write in the doc copied to assert_can_write(doc_id, session=session) if not same_doc: if cut: # To cut the user need to be able to write the original doc assert_can_write(old_data_doc.id, session=session) else: # To copy the user need to be able to read the original doc assert_can_read(old_data_doc.id, session=session) if cut: old_cell_index = logic.get_data_doc_data_cell( cell_id, session=session).cell_order logic.move_data_doc_cell_to_doc(cell_id, doc_id, index, session=session) if same_doc: # Account for shift in original index # See more details in move_data_doc_cell_to_doc if old_cell_index < index: index -= 1 socketio.emit( "data_cell_moved", # sid, from_index, to_index ( sid, old_cell_index, index, ), namespace=DATA_DOC_NAMESPACE, room=doc_id, broadcast=True, ) else: socketio.emit( "data_cell_inserted", ( sid, index, data_cell.to_dict(), ), namespace=DATA_DOC_NAMESPACE, room=doc_id, broadcast=True, ) socketio.emit( "data_cell_deleted", ( sid, cell_id, ), namespace=DATA_DOC_NAMESPACE, room=old_data_doc.id, broadcast=True, ) else: # Copy new_cell_dict = insert_data_cell( doc_id, index, data_cell.cell_type.name, data_cell.context, data_cell.meta, sid, session=session, ) # Copy all query history over logic.copy_cell_history(cell_id, new_cell_dict["id"], session=session) # To resolve the sender's promise socketio.emit( "data_cell_pasted", (sid), namespace=DATA_DOC_NAMESPACE, room=doc_id, broadcast=False, )