def detect_expired_session(cls): ttl = SESSION_VALID_PERIOD detect_logger().info( f'start detect expired session by ttl {ttl/1000} s') try: session_records = Session.query_sessions( create_time=[None, current_timestamp() - ttl]) manager_session_id_list = [] for session_record in session_records: manager_session_id = session_record.f_manager_session_id if manager_session_id not in manager_session_id: continue manager_session_id_list.append(manager_session_id) detect_logger().info( f'start destroy session {manager_session_id}') try: sess = Session(session_id=manager_session_id, options={"logger": detect_logger()}) sess.destroy_all_sessions() except Exception as e: detect_logger().error( f'stop session {manager_session_id} error', e) finally: detect_logger().info( f'stop session {manager_session_id} successfully') except Exception as e: detect_logger().error('detect expired session error', e) finally: detect_logger().info('finish detect expired session')
def table_delete(): request_data = request.json table_name = request_data.get('table_name') namespace = request_data.get('namespace') data = None sess = Session() table = sess.get_table(name=table_name, namespace=namespace, ignore_disable=True) if table: table.destroy() data = {'table_name': table_name, 'namespace': namespace} sess.destroy_all_sessions() if data: return get_json_result(data=data) return get_json_result(retcode=101, retmsg='no find table')
def table_delete_disable(): request_data = request.json adapter_request_data(request_data) tables_meta = storage.StorageTableMeta.query_table_meta( filter_fields={"disable": True}) data = [] sess = Session() for table_meta in tables_meta: table = sess.get_table(name=table_meta.f_name, namespace=table_meta.f_namespace, ignore_disable=True) if table: table.destroy() data.append({ 'table_name': table_meta.f_name, 'namespace': table_meta.f_namespace }) sess.destroy_all_sessions() if data: return get_json_result(data=data) return get_json_result(retcode=101, retmsg='no find table')
def table_bind(): request_data = request.json address_dict = request_data.get('address') engine = request_data.get('engine') name = request_data.get('name') namespace = request_data.get('namespace') address = storage.StorageTableMeta.create_address( storage_engine=engine, address_dict=address_dict) in_serialized = request_data.get( "in_serialized", 1 if engine in { storage.StorageEngine.STANDALONE, storage.StorageEngine.EGGROLL, storage.StorageEngine.MYSQL, storage.StorageEngine.PATH } else 0) destroy = (int(request_data.get("drop", 0)) == 1) data_table_meta = storage.StorageTableMeta(name=name, namespace=namespace) if data_table_meta: if destroy: data_table_meta.destroy_metas() else: return get_json_result( retcode=100, retmsg='The data table already exists.' 'If you still want to continue uploading, please add the parameter --drop' ) id_column = request_data.get("id_column") or request_data.get("id_name") feature_column = request_data.get("feature_column") or request_data.get( "feature_name") schema = None if id_column and feature_column: schema = {'header': feature_column, 'sid': id_column} elif id_column: schema = {'sid': id_column, 'header': ''} sess = Session() storage_session = sess.storage(storage_engine=engine, options=request_data.get("options")) table = storage_session.create_table( address=address, name=name, namespace=namespace, partitions=request_data.get('partitions', None), hava_head=request_data.get("head"), schema=schema, id_delimiter=request_data.get("id_delimiter"), in_serialized=in_serialized, origin=request_data.get("origin", StorageTableOrigin.TABLE_BIND)) response = get_json_result(data={ "table_name": name, "namespace": namespace }) if not table.check_address(): response = get_json_result( retcode=100, retmsg=f'engine {engine} address {address_dict} check failed') else: DataTableTracker.create_table_tracker( table_name=name, table_namespace=namespace, entity_info={"have_parent": False}, ) sess.destroy_all_sessions() return response