def get_current_version(migration_type): current_version = None try: sql_select = f'''select major_version, minor_version, patch_version, migration from {DB_SCHEMA}.data_version where migration_type = '{migration_type}';''' sql_result = db_util.run_query(sql_select, encapsulate_exception=False) row_count = len(sql_result) if row_count == 1: current_version = sql_result[0] elif row_count == 0: current_version = (-1, -1, -1, -1) else: assert row_count == 1 except psycopg2.errors.UndefinedTable: # pylint: disable=no-member current_version = (-1, -1, -1, -1) except psycopg2.errors.UndefinedColumn: # pylint: disable=no-member assert migration_type == consts.MIGRATION_TYPE_SCHEMA sql_select = f'''select major_version, minor_version, patch_version, migration from {DB_SCHEMA}.data_version;''' sql_result = db_util.run_query(sql_select, encapsulate_exception=False) row_count = len(sql_result) assert row_count == 1 current_version = sql_result[0] return current_version
def test_updated_at(publication_type): workspace = 'test_update_at_workspace' publication = 'test_update_at_publication' query = f''' select p.updated_at from {db_schema}.publications p inner join {db_schema}.workspaces w on p.id_workspace = w.id where w.name = %s and p.type = %s and p.name = %s ;''' timestamp1 = datetime.datetime.now(datetime.timezone.utc) process_client.publish_workspace_publication(publication_type, workspace, publication) timestamp2 = datetime.datetime.now(datetime.timezone.utc) with app.app_context(): results = db_util.run_query(query, (workspace, publication_type, publication)) assert len(results) == 1 and len(results[0]) == 1, results updated_at_db = results[0][0] assert timestamp1 < updated_at_db < timestamp2 info = process_client.get_workspace_publication(publication_type, workspace, publication) updated_at_rest_str = info['updated_at'] updated_at_rest = parse(updated_at_rest_str) assert timestamp1 < updated_at_rest < timestamp2 timestamp3 = datetime.datetime.now(datetime.timezone.utc) process_client.patch_workspace_publication(publication_type, workspace, publication, title='Title') timestamp4 = datetime.datetime.now(datetime.timezone.utc) with app.app_context(): results = db_util.run_query(query, (workspace, publication_type, publication)) assert len(results) == 1 and len(results[0]) == 1, results updated_at_db = results[0][0] assert timestamp3 < updated_at_db < timestamp4 info = process_client.get_workspace_publication(publication_type, workspace, publication) updated_at_rest_str = info['updated_at'] updated_at_rest = parse(updated_at_rest_str) assert timestamp3 < updated_at_rest < timestamp4 process_client.delete_workspace_publication(publication_type, workspace, publication)
def get_bbox_sphere_size(workspace, publication_type, publication): query = f""" select ST_DistanceSphere( st_transform(st_setsrid(ST_MakePoint( ST_XMin(bbox), (ST_YMax(bbox) + ST_YMin(bbox)) / 2 ), srid), 4326), st_transform(st_setsrid(ST_MakePoint( ST_XMax(bbox), (ST_YMax(bbox) + ST_YMin(bbox)) / 2 ), srid), 4326) ) as x_size, ST_DistanceSphere( st_transform(st_setsrid(ST_MakePoint( ST_YMin(bbox), (ST_XMax(bbox) + ST_XMin(bbox)) / 2 ), srid), 4326), st_transform(st_setsrid(ST_MakePoint( ST_YMax(bbox), (ST_XMax(bbox) + ST_XMin(bbox)) / 2 ), srid), 4326) ) as y_size from {DB_SCHEMA}.publications where type = %s and name = %s and id_workspace = (select w.id from {DB_SCHEMA}.workspaces w where w.name = %s) """ [x_size, y_size] = db_util.run_query(query, (publication_type, publication, workspace))[0] return [x_size, y_size]
def get_crs(workspace, layername, conn_cur=None): query = f''' select Find_SRID('{workspace}', '{layername}', 'wkb_geometry'); ''' srid = db_util.run_query(query, conn_cur=conn_cur)[0][0] crs = db_util.get_crs(srid) return crs
def point_coordinates( workspace, publ_type, name, *, point_id, crs, exp_coordinates, precision, ): assert publ_type == LAYER_TYPE query = f'''with transformed as (select st_transform(wkb_geometry, %s) point from {workspace}.{name} where point_id = %s) select st_x(point), st_y(point) from transformed ;''' with app.app_context(): to_srid = db_util.get_srid(crs) coordinates = db_util.run_query(query, (to_srid, point_id)) assert len(coordinates) == 1, coordinates coordinates = coordinates[0] for i in range(0, 1): assert abs( exp_coordinates[i] - coordinates[i] ) <= precision, f'exp_coordinates={exp_coordinates}, coordinates={coordinates}'
def insert_publication(workspace_name, info): id_workspace = workspaces.ensure_workspace(workspace_name) check_publication_info(workspace_name, info) insert_publications_sql = f'''insert into {DB_SCHEMA}.publications as p (id_workspace, name, title, type, uuid, style_type, everyone_can_read, everyone_can_write, updated_at) values (%s, %s, %s, %s, %s, %s, %s, %s, current_timestamp) returning id ;''' data = ( id_workspace, info.get("name"), info.get("title"), info.get("publ_type_name"), info.get("uuid"), info.get('style_type'), ROLE_EVERYONE in info['access_rights']['read'], ROLE_EVERYONE in info['access_rights']['write'], ) pub_id = db_util.run_query(insert_publications_sql, data)[0][0] read_users = clear_roles(info['access_rights']['read'], workspace_name) write_users = clear_roles(info['access_rights']['write'], workspace_name) rights.insert_rights(pub_id, read_users, 'read') rights.insert_rights(pub_id, write_users, 'write') return pub_id
def update_publication(workspace_name, info): id_workspace = workspaces.get_workspace_infos(workspace_name)[workspace_name]["id"] right_type_list = ['read', 'write'] access_rights_changes = dict() for right_type in right_type_list: access_rights_changes[right_type] = { 'EVERYONE': None, 'add': set(), 'remove': set(), } if info.get("access_rights") and (info["access_rights"].get("read") or info["access_rights"].get("write")): info_old = get_publication_infos(workspace_name, info["publ_type_name"])[(workspace_name, info["publ_type_name"], info["name"],)] for right_type in right_type_list: access_rights_changes[right_type]['username_list_old'] = info_old["access_rights"][right_type] info["access_rights"][right_type + "_old"] = access_rights_changes[right_type]['username_list_old'] check_publication_info(workspace_name, info) for right_type in right_type_list: if info['access_rights'].get(right_type): usernames_list = info["access_rights"].get(right_type) access_rights_changes[right_type]['EVERYONE'] = ROLE_EVERYONE in usernames_list usernames_list_clear = clear_roles(usernames_list, workspace_name) usernames_old_list_clear = clear_roles(access_rights_changes[right_type]['username_list_old'], workspace_name) access_rights_changes[right_type]['add'] = usernames_list_clear.difference(usernames_old_list_clear) access_rights_changes[right_type]['remove'] = usernames_old_list_clear.difference(usernames_list_clear) update_publications_sql = f'''update {DB_SCHEMA}.publications set title = coalesce(%s, title), style_type = coalesce(%s, style_type), everyone_can_read = coalesce(%s, everyone_can_read), everyone_can_write = coalesce(%s, everyone_can_write), updated_at = current_timestamp where id_workspace = %s and name = %s and type = %s returning id ;''' data = (info.get("title"), info.get('style_type'), access_rights_changes['read']['EVERYONE'], access_rights_changes['write']['EVERYONE'], id_workspace, info.get("name"), info.get("publ_type_name"), ) pub_id = db_util.run_query(update_publications_sql, data)[0][0] for right_type in right_type_list: rights.insert_rights(pub_id, access_rights_changes[right_type]['add'], right_type) rights.remove_rights(pub_id, access_rights_changes[right_type]['remove'], right_type) return pub_id
def test_adjust_prime_db_schema_for_bbox_search(): workspace = 'test_adjust_prime_db_schema_for_bbox_search_workspace' layer = 'test_adjust_prime_db_schema_for_bbox_search_layer' map = 'test_adjust_prime_db_schema_for_bbox_search_map' expected_bbox_layer = test_data.SMALL_LAYER_BBOX expected_bbox_map = test_data.SMALL_MAP_BBOX process_client.publish_workspace_layer(workspace, layer) process_client.publish_workspace_map(workspace, map) with app.app_context(): statement = f'ALTER TABLE {db_schema}.publications ALTER COLUMN bbox DROP NOT NULL;' db_util.run_statement(statement) statement = f'update {db_schema}.publications set bbox = null;' db_util.run_statement(statement) query = f'select p.id from {db_schema}.publications p where p.bbox is not null;' results = db_util.run_query(query) assert not results, results upgrade_v1_12.adjust_data_for_bbox_search() for publication_type, publication, expected_bbox in [ ('layman.layer', layer, expected_bbox_layer), ('layman.map', map, expected_bbox_map) ]: query = f''' select ST_XMIN(p.bbox), ST_YMIN(p.bbox), ST_XMAX(p.bbox), ST_YMAX(p.bbox) from {db_schema}.publications p inner join {db_schema}.workspaces w on p.id_workspace = w.id where w.name = %s and p.type = %s and p.name = %s ;''' results = db_util.run_query( query, (workspace, publication_type, publication)) assert len(results) == 1 and len(results[0]) == 4, results bbox = results[0] assert_util.assert_same_bboxes(bbox, expected_bbox, 0.000001) process_client.delete_workspace_layer(workspace, layer)
def test_adjust_prime_db_schema_for_last_change_search(): workspace = 'test_adjust_prime_db_schema_for_last_change_search_workspace' layer = 'test_adjust_prime_db_schema_for_last_change_search_layer' map = 'test_adjust_prime_db_schema_for_last_change_search_map' timestamp1 = datetime.datetime.now(datetime.timezone.utc) process_client.publish_workspace_layer(workspace, layer) process_client.publish_workspace_map(workspace, map) timestamp2 = datetime.datetime.now(datetime.timezone.utc) with app.app_context(): statement = f'ALTER TABLE {db_schema}.publications ALTER COLUMN updated_at DROP NOT NULL;' db_util.run_statement(statement) statement = f'update {db_schema}.publications set updated_at = null;' db_util.run_statement(statement) query = f'select p.id from {db_schema}.publications p where p.updated_at is not null;' results = db_util.run_query(query) assert not results, results upgrade_v1_12.adjust_data_for_last_change_search() query = f''' select p.updated_at from {db_schema}.publications p inner join {db_schema}.workspaces w on p.id_workspace = w.id where w.name = %s and p.type = %s and p.name = %s ;''' results = db_util.run_query(query, (workspace, 'layman.layer', layer)) assert len(results) == 1 and len(results[0]) == 1, results layer_updated_at = results[0][0] assert timestamp1 < layer_updated_at < timestamp2 results = db_util.run_query(query, (workspace, 'layman.map', map)) assert len(results) == 1 and len(results[0]) == 1, results map_updated_at = results[0][0] assert timestamp1 < map_updated_at < timestamp2 assert layer_updated_at < map_updated_at process_client.delete_workspace_layer(workspace, layer) process_client.delete_workspace_map(workspace, map)
def test_ensure_workspace(): username = '******' sql = f'select ws.last_value from {DB_SCHEMA}.workspaces_id_seq ws;' with app.app_context(): id_workspace = workspace_util.ensure_workspace(username) assert id_workspace id_user = user_util.get_user_infos(username) assert not id_user workspaces_seq_value_1 = db_util.run_query(sql)[0][0] assert workspaces_seq_value_1 == id_workspace,\ f'workspaces_seq_value_1={workspaces_seq_value_1}, id_workspace={id_workspace}' id_workspace2 = workspace_util.ensure_workspace(username) assert id_workspace == id_workspace2 workspaces_seq_value_2 = db_util.run_query(sql)[0][0] assert workspaces_seq_value_2 == workspaces_seq_value_1,\ f'workspaces_seq_value_1={workspaces_seq_value_1}, id_workspace={id_workspace}, workspaces_seq_value_2={workspaces_seq_value_2}' infos = workspace_util.get_workspace_infos() assert username in infos assert infos[username]['id'] == id_workspace infos = workspace_util.get_workspace_infos(username) assert username in infos assert infos[username]['id'] == id_workspace workspace_util.delete_workspace(username) infos = workspace_util.get_workspace_infos() assert username not in infos infos = workspace_util.get_workspace_infos(username) assert username not in infos ensure_whole_user(username) workspaces_seq_value_3 = db_util.run_query(sql)[0][0] assert workspaces_seq_value_3 == workspaces_seq_value_2 + 1,\ f'workspaces_seq_value_1={workspaces_seq_value_1}, id_workspace={id_workspace},' \ f'workspaces_seq_value_2={workspaces_seq_value_2}, workspaces_seq_value_3={workspaces_seq_value_3}' infos = workspace_util.get_workspace_infos() assert username in infos infos = workspace_util.get_workspace_infos(username) assert username in infos id_user = user_util.get_user_infos(username) assert not id_user
def assert_out_of_the_box_publications(expected_count): query = f'''select count(*) from {DB_SCHEMA}.publications p where st_xMin(p.bbox) < -20026376.39 or st_yMin(p.bbox) < -20048966.10 or st_xMax(p.bbox) > 20026376.39 or st_yMax(p.bbox) > 20048966.10 ;''' with app.app_context(): cnt = db_util.run_query(query) assert cnt[0][0] == expected_count, cnt
def insert_rights( id_publication, users, type, ): sql = f'''insert into {DB_SCHEMA}.rights (id_user, id_publication, type) select u.id, %s, %s from {DB_SCHEMA}.users u inner join {DB_SCHEMA}.workspaces w on w.id = u.id_workspace where w.name = %s returning id ;''' for username in users: db_util.run_query(sql, ( id_publication, type, username, ))
def ensure_workspace(name): workspaces = get_workspace_infos(name) if workspaces: result = workspaces[name]["id"] else: sql = f"""insert into {DB_SCHEMA}.workspaces (name) values (%s) ON CONFLICT (name) DO update SET name = EXCLUDED.name returning id;""" data = (name, ) ids = db_util.run_query(sql, data) result = ids[0][0] return result
def geoserver_everyone_rights_repair(): logger.info( f' Starting - access rights EVERYONE is not propagated to GeoServer for authenticated users' ) select_layers = f"select w.name, p.name " \ f"from {DB_SCHEMA}.publications p inner join {DB_SCHEMA}.workspaces w on w.id = p.id_workspace " \ f"where p.type = 'layman.layer' " publication_infos = db_util.run_query(select_layers) select_rights = f"""select (select rtrim(concat(case when u.id is not null then w.name || ',' end, string_agg(w2.name, ',') || ',', case when p.everyone_can_read then '{settings.RIGHTS_EVERYONE_ROLE}' || ',' end ), ',') from _prime_schema.rights r inner join _prime_schema.users u2 on r.id_user = u2.id inner join _prime_schema.workspaces w2 on w2.id = u2.id_workspace where r.id_publication = p.id and r.type = %s) can_read_users from _prime_schema.workspaces w inner join _prime_schema.publications p on p.id_workspace = w.id and p.type = 'layman.layer' left join _prime_schema.users u on u.id_workspace = w.id where w.name = %s and p.name = %s""" for (workspace, publication_name) in publication_infos: for right_type in ['read', 'write']: users_roles = db_util.run_query( select_rights, (right_type, workspace, publication_name))[0] security_roles = gs_common.layman_users_to_geoserver_roles( users_roles) logger.info( f' Setting security roles for: ({workspace}/{publication_name}).{right_type} ' f'to ({security_roles}) from layman roles ({users_roles})') gs_util.ensure_layer_security_roles(workspace, publication_name, security_roles, right_type[0], settings.LAYMAN_GS_AUTH) logger.info( f' DONE - access rights EVERYONE is not propagated to GeoServer for authenticated users' )
def get_bbox(username, layername, conn_cur=None): query = f''' with tmp as (select ST_Extent(l.wkb_geometry) as bbox from {username}.{layername} l ) select st_xmin(bbox), st_ymin(bbox), st_xmax(bbox), st_ymax(bbox) from tmp ''' result = db_util.run_query(query, conn_cur=conn_cur)[0] return result
def test_ensure_user(): username = '******' userinfo = { "issuer_id": 'mock_test_users_test', "sub": '10', "claims": { "email": "*****@*****.**", "name": "test ensure user", "preferred_username": '******', "given_name": "test", "family_name": "user", "middle_name": "ensure", } } sql = f'select us.last_value from {DB_SCHEMA}.users_id_seq us;' with app.app_context(): id_workspace = workspace_util.ensure_workspace(username) user_id = user_util.ensure_user(id_workspace, userinfo) assert user_id user_id2 = user_util.ensure_user(id_workspace, userinfo) assert user_id2 == user_id prime_db_schema.delete_whole_user(username) with app.app_context(): users_seq_value_1 = db_util.run_query(sql)[0][0] (id_workspace, id_user) = prime_db_schema.ensure_whole_user(username, userinfo) users_seq_value_2 = db_util.run_query(sql)[0][0] assert users_seq_value_2 == id_user,\ f'users_seq_value_1={users_seq_value_1}, id_user={id_user}, users_seq_value_2={users_seq_value_2}' assert users_seq_value_2 == users_seq_value_1 + 1,\ f'users_seq_value_1={users_seq_value_1}, id_user={id_user}, users_seq_value_2={users_seq_value_2}' (_, id_user2) = prime_db_schema.ensure_whole_user(username, userinfo) users_seq_value_3 = db_util.run_query(sql)[0][0] assert id_user2 == id_user assert users_seq_value_3 == users_seq_value_2,\ f'users_seq_value_1={users_seq_value_1}, id_user={id_user},' \ f'users_seq_value_2={users_seq_value_2}, users_seq_value_3={users_seq_value_3}'
def test_schema(): with app.app_context(): run_statement(model.DROP_SCHEMA_SQL) ensure_schema(settings.LAYMAN_PRIME_SCHEMA, ) workspaces = run_query(f'select count(*) from {DB_SCHEMA}.workspaces;') assert workspaces[0][0] == len(util.get_workspaces()) users = run_query(f'select count(*) from {DB_SCHEMA}.users;') assert users[0][0] == len(util.get_usernames(use_cache=False)) user_infos = workspaces_util.get_workspace_infos(USERNAME) assert USERNAME in user_infos select_publications = f"""with const as (select %s workspace_name) select w.name as workspace_name, p.type, p.name from const c inner join {DB_SCHEMA}.workspaces w on w.name = c.workspace_name inner join {DB_SCHEMA}.publications p on p.id_workspace = w.id left join {DB_SCHEMA}.users u on u.id_workspace = w.id ;""" pub_infos = run_query(select_publications, (USERNAME, )) assert (USERNAME, LAYER_TYPE, LAYERNAME) in pub_infos assert (USERNAME, MAP_TYPE, MAPNAME) in pub_infos
def migrate_maps_on_wms_workspace(): logger.info(f' Starting - migrate maps json urls') query = f''' select w.name, p.name from {db_schema}.publications p inner join {db_schema}.workspaces w on w.id = p.id_workspace where p.type = %s ''' params = (MAP_TYPE, ) publications = db_util.run_query(query, params) gs_url = layer_gs_util.get_gs_proxy_base_url() gs_url = gs_url if gs_url.endswith('/') else f"{gs_url}/" gs_wms_url_pattern = r'^' + re.escape( gs_url ) + r'(' + util.USERNAME_ONLY_PATTERN + r')' + r'(/(?:ows|wms|wfs).*)$' all_workspaces = workspaces.get_workspace_names() for (workspace, map) in publications: file_path = input_file.get_map_file(workspace, map) is_changed = False with open(file_path, 'r') as map_file: map_json_raw = json.load(map_file) map_json = input_file.unquote_urls(map_json_raw) for map_layer in map_json['layers']: layer_url = map_layer.get('url', None) if not layer_url: continue match = re.match(gs_wms_url_pattern, layer_url) if not match: continue layer_workspace = match.group(1) if not layer_workspace: continue if layer_workspace not in all_workspaces: logger.warning( f' Do not know workspace {layer_workspace} in map {workspace}.{map}. Not migrating this url.' ) continue layer_wms_workspace = wms.get_geoserver_workspace( layer_workspace) map_layer[ 'url'] = f'{gs_url}{layer_wms_workspace}{match.group(2)}' is_changed = True if is_changed: logger.info(f' Store new json for {workspace}.{map}') with open(file_path, 'w') as map_file: json.dump(map_json, map_file, indent=4) logger.info(f' DONE - migrate maps json urls')
def transform(bbox, epsg_from=4326, epsg_to=3857): query = f''' with tmp as (select ST_Transform(ST_SetSRID(ST_MakeBox2D(ST_Point(%s, %s), ST_Point(%s, %s)), %s), %s) bbox) select st_xmin(bbox), st_ymin(bbox), st_xmax(bbox), st_ymax(bbox) from tmp ;''' params = bbox + ( epsg_from, epsg_to, ) result = db_util.run_query(query, params)[0] return result
def get_user_infos(username=None, iss_sub=None, id_workspace=None): assert not (username and iss_sub) iss_sub = iss_sub or dict() join_clause = '1 = 1' if username: join_clause = 'c.username = w.name' elif iss_sub: join_clause = 'c.issuer_id = u.issuer_id and c.sub = u.sub' elif id_workspace: join_clause = 'c.id_workspace = w.id' sql = f"""with const as (select %s username, %s issuer_id, %s sub, %s id_workspace) select u.id, w.name username, u.preferred_username, u.given_name, u.family_name, u.middle_name, u.name, u.email, u.issuer_id, u.sub from {DB_SCHEMA}.workspaces w inner join {DB_SCHEMA}.users u on w.id = u.id_workspace inner join const c on (""" + join_clause + """) order by w.name asc ;""" params = (username, iss_sub.get('issuer_id'), iss_sub.get('sub'), id_workspace) values = db_util.run_query(sql, params) result = { username: { "id": user_id, "username": username, "preferred_username": preferred_username, "given_name": given_name, "family_name": family_name, "middle_name": middle_name, "name": name, "email": email, "issuer_id": issuer_id, "sub": sub, } for user_id, username, preferred_username, given_name, family_name, middle_name, name, email, issuer_id, sub in values } return result
def get_workspace_infos(name=None): sql = f"""with const as (select %s as name) select w.id, w.name from {DB_SCHEMA}.workspaces w inner join const c on ( c.name = w.name or c.name is null) order by w.name asc ;""" values = db_util.run_query(sql, (name, )) result = { name: { "id": workspace_id, "name": name, } for workspace_id, name in values } return result
def geoserver_remove_users_for_public_workspaces(): logger.info( f' Starting - delete unnecessary users and roles created for public workspaces' ) sql_select_public_workspaces = f''' select w.name from {DB_SCHEMA}.workspaces w where NOT EXISTS(select 0 FROM {DB_SCHEMA}.USERS u where u.id_workspace = w.id)''' public_workspaces = db_util.run_query(sql_select_public_workspaces) auth = settings.LAYMAN_GS_AUTH for (workspace, ) in public_workspaces: logger.info(f' Delete user and role for workspace {workspace}') role = gs_util.username_to_rolename(workspace) gs_util.delete_user_role(workspace, role, auth) gs_util.delete_user_role(workspace, settings.LAYMAN_GS_ROLE, auth) gs_util.delete_role(role, auth) gs_util.delete_user(workspace, auth) logger.info( f' DONE - delete unnecessary users and roles created for public workspaces' )
def adjust_data_for_last_change_search(): logger.info(f' Starting - Set updated_at for all publications') query = f'''select p.id, w.name, p.type, p.name from {DB_SCHEMA}.publications p inner join {DB_SCHEMA}.workspaces w on w.id = p.id_workspace ;''' publications = db_util.run_query(query) for ( row_id, workspace, type, name, ) in publications: publ_dir = os.path.join( fs_util.get_workspaces_dir(), workspace, type.split('.')[1] + 's', name, ) updated_at = None for root, _, files in os.walk(publ_dir): for file in files: file_updated_at = os.stat(os.path.join(root, file)).st_mtime updated_at = max( updated_at, file_updated_at) if updated_at else file_updated_at updated_at = datetime.datetime.fromtimestamp(updated_at, datetime.timezone.utc)\ if updated_at else datetime.datetime.now(datetime.timezone.utc) update = f'update {DB_SCHEMA}.publications set updated_at = %s where id = %s;' db_util.run_statement(update, ( updated_at, row_id, )) statement = f'ALTER TABLE {DB_SCHEMA}.publications ALTER COLUMN updated_at SET NOT NULL;' db_util.run_statement(statement) logger.info(f' DONE - Set updated_at for all publications')
def migrate_input_sld_directory_to_input_style(): logger.info(f' Starting - migrate input_sld directories to input_style') query = f''' select w.name, p.name from {db_schema}.publications p inner join {db_schema}.workspaces w on w.id = p.id_workspace where p.type = %s ''' params = (LAYER_TYPE, ) publications = db_util.run_query(query, params) for (workspace, layer) in publications: sld_path = os.path.join(layer_fs_util.get_layer_dir(workspace, layer), 'input_sld') if os.path.exists(sld_path): logger.info(f' Migrate layer {workspace}.{layer}') style_path = input_style.get_layer_input_style_dir( workspace, layer) os.rename(sld_path, style_path) logger.info(f' DONE - migrate input_sld directories to input_style')
def set_current_migration_version(migration_type, version): try: sql_insert = f'''update {DB_SCHEMA}.data_version set major_version = %s, minor_version = %s, patch_version = %s, migration = %s where migration_type = '{migration_type}';''' db_util.run_statement(sql_insert, version, encapsulate_exception=False) except psycopg2.errors.UndefinedColumn: # pylint: disable=no-member assert migration_type == consts.MIGRATION_TYPE_SCHEMA sql_select = f'''select count(*) from {DB_SCHEMA}.data_version;''' sql_result = db_util.run_query(sql_select) assert sql_result[0][0] == 1 sql_insert = f'''update {DB_SCHEMA}.data_version set major_version = %s, minor_version = %s, patch_version = %s, migration = %s;''' db_util.run_statement(sql_insert, version)
def migrate_layer_metadata(workspace_filter=None): logger.info(f' Starting - migrate layer metadata records') query = f''' select w.name, p.name from {DB_SCHEMA}.publications p inner join {DB_SCHEMA}.workspaces w on w.id = p.id_workspace where p.type = %s ''' params = (LAYER_TYPE, ) if workspace_filter: query = query + ' AND w.name = %s' params = params + (workspace_filter, ) publications = db_util.run_query(query, params) for (workspace, layer) in publications: logger.info(f' Migrate layer {workspace}.{layer}') try: muuid = layer_csw.patch_layer(workspace, layer, ['wms_url', 'wfs_url'], create_if_not_exists=False, timeout=2) if not muuid: logger.warning( f' Metadata record of layer was not migrated, because the record does not exist.' ) except requests.exceptions.ReadTimeout: md_props = list( layer_csw.get_metadata_comparison(workspace, layer).values()) md_wms_url = md_props[0]['wms_url'] if md_props else None base_wms_url = wms.add_capabilities_params_to_url( wms.get_wms_url(workspace, external_url=True)) exp_wms_url = f"{base_wms_url}&LAYERS={layer}" if md_wms_url != exp_wms_url: logger.exception( f' WMS URL was not migrated (should be {exp_wms_url}, but is {md_wms_url})!' ) time.sleep(0.5) logger.info(f' DONE - migrate layer metadata records')
def ensure_user(id_workspace, userinfo): users = get_user_infos(id_workspace=id_workspace) if users: result = list(users.values())[0]["id"] else: sql = f"""insert into {DB_SCHEMA}.users (id_workspace, preferred_username, given_name, family_name, middle_name, name, email, issuer_id, sub) values (%s, %s, %s, %s, %s, %s, %s, %s, %s) ON CONFLICT (id_workspace) DO update SET id_workspace = EXCLUDED.id_workspace returning id;""" data = ( id_workspace, userinfo["claims"]["preferred_username"], userinfo["claims"]["given_name"], userinfo["claims"]["family_name"], userinfo["claims"]["middle_name"], userinfo["claims"]["name"], userinfo["claims"]["email"], userinfo["issuer_id"], userinfo["sub"], ) ids = db_util.run_query(sql, data) result = ids[0][0] return result
def transform(bbox, crs_from, crs_to): if is_empty(bbox): return None, None, None, None srid_from = db_util.get_srid(crs_from) srid_to = db_util.get_srid(crs_to) world_bounds = crs_def.CRSDefinitions[crs_to].world_bounds.get(crs_from) if world_bounds: bbox = ( min(max(bbox[0], world_bounds[0]), world_bounds[2]), min(max(bbox[1], world_bounds[1]), world_bounds[3]), max(min(bbox[2], world_bounds[2]), world_bounds[0]), max(min(bbox[3], world_bounds[3]), world_bounds[1]), ) query = f''' with tmp as (select ST_Transform(ST_SetSRID(ST_MakeBox2D(ST_Point(%s, %s), ST_Point(%s, %s)), %s), %s) bbox) select st_xmin(bbox), st_ymin(bbox), st_xmax(bbox), st_ymax(bbox) from tmp ;''' params = tuple(bbox) + ( srid_from, srid_to, ) result = db_util.run_query(query, params)[0] max_bbox = crs_def.CRSDefinitions[crs_to].max_bbox result = ( min(max(result[0], max_bbox[0]), max_bbox[2]), min(max(result[1], max_bbox[1]), max_bbox[3]), max(min(result[2], max_bbox[2]), max_bbox[0]), max(min(result[3], max_bbox[3]), max_bbox[1]), ) if max_bbox else result return result
def initialize_data_versioning(): logger.info(f' Starting - data versioning initialization') sql_create_table = f'''CREATE TABLE IF NOT EXISTS {DB_SCHEMA}.data_version ( major_version integer not null, minor_version integer not null, patch_version integer not null, migration integer not null ) TABLESPACE pg_default;''' db_util.run_statement(sql_create_table) # This table should have only one row and now should have none, otherwise something is wrong. sql_select_count = f'''select count(*) from {DB_SCHEMA}.data_version''' row_count = db_util.run_query(sql_select_count)[0][0] assert row_count == 0 # Set initialization value to 0 sql_insert = f'''insert into {DB_SCHEMA}.data_version (major_version, minor_version, patch_version, migration) values (1, 9, 0, 0);''' db_util.run_statement(sql_insert) logger.info(f' DONE - data versioning initialization')
def get_publication_infos_with_metainfo( workspace_name=None, pub_type=None, style_type=None, reader=None, writer=None, limit=None, offset=None, full_text_filter=None, bbox_filter=None, order_by_list=None, ordering_full_text=None, ordering_bbox=None, ): order_by_list = order_by_list or [] full_text_tsquery = db_util.to_tsquery_string( full_text_filter) if full_text_filter else None full_text_like = '%' + full_text_filter + '%' if full_text_filter else None ordering_full_text_tsquery = db_util.to_tsquery_string( ordering_full_text) if ordering_full_text else None where_params_def = [ (workspace_name, 'w.name = %s', (workspace_name, )), (pub_type, 'p.type = %s', (pub_type, )), (style_type, 'p.style_type::text = %s', (style_type, )), (reader and not is_user_with_name(reader), 'p.everyone_can_read = TRUE', tuple()), (is_user_with_name(reader), f"""(p.everyone_can_read = TRUE or (u.id is not null and w.name = %s) or EXISTS(select 1 from {DB_SCHEMA}.rights r inner join {DB_SCHEMA}.users u2 on r.id_user = u2.id inner join {DB_SCHEMA}.workspaces w2 on w2.id = u2.id_workspace where r.id_publication = p.id and r.type = 'read' and w2.name = %s))""", ( reader, reader, )), (writer and not is_user_with_name(writer), 'p.everyone_can_write = TRUE', tuple()), (is_user_with_name(writer), f"""(p.everyone_can_write = TRUE or (u.id is not null and w.name = %s) or EXISTS(select 1 from {DB_SCHEMA}.rights r inner join {DB_SCHEMA}.users u2 on r.id_user = u2.id inner join {DB_SCHEMA}.workspaces w2 on w2.id = u2.id_workspace where r.id_publication = p.id and r.type = 'write' and w2.name = %s))""", ( writer, writer, )), (full_text_filter, '(_prime_schema.my_unaccent(p.title) @@ to_tsquery(unaccent(%s))' 'or lower(unaccent(p.title)) like lower(unaccent(%s)))', ( full_text_tsquery, full_text_like, )), (bbox_filter, 'p.bbox && ST_MakeBox2D(ST_MakePoint(%s, %s), ST_MakePoint(%s, %s))', bbox_filter), ] order_by_definition = { consts.ORDER_BY_FULL_TEXT: ('ts_rank_cd(_prime_schema.my_unaccent(p.title), to_tsquery(unaccent(%s))) DESC', (ordering_full_text_tsquery, )), consts.ORDER_BY_TITLE: ('lower(unaccent(p.title)) ASC', tuple()), consts.ORDER_BY_LAST_CHANGE: ('updated_at DESC', tuple()), consts.ORDER_BY_BBOX: (""" -- A∩B / (A + B) CASE -- if there is any intersection WHEN p.bbox && ST_MakeBox2D(ST_MakePoint(%s, %s), ST_MakePoint(%s, %s)) THEN -- in cases, when area of intersection is 0, we want it rank higher than no intersection GREATEST(st_area(st_intersection(p.bbox, ST_MakeBox2D(ST_MakePoint(%s, %s), ST_MakePoint(%s, %s)))), 1) -- we have to solve division by 0 / (GREATEST(st_area(p.bbox), 1) + GREATEST(st_area(ST_MakeBox2D(ST_MakePoint(%s, %s), ST_MakePoint(%s, %s))), 1) ) -- if there is no intersection, result is 0 in all cases ELSE 0 END DESC """, ordering_bbox + ordering_bbox + ordering_bbox if ordering_bbox else tuple()), } assert all(ordering_item in order_by_definition.keys() for ordering_item in order_by_list) ######################################################### # SELECT clause select_clause = f""" select p.id as id_publication, w.name as workspace_name, p.type, p.name, p.title, p.uuid::text, p.style_type, p.updated_at, ST_XMIN(p.bbox) as xmin, ST_YMIN(p.bbox) as ymin, ST_XMAX(p.bbox) as xmax, ST_YMAX(p.bbox) as ymax, (select rtrim(concat(case when u.id is not null then w.name || ',' end, string_agg(w2.name, ',') || ',', case when p.everyone_can_read then %s || ',' end ), ',') from {DB_SCHEMA}.rights r inner join {DB_SCHEMA}.users u2 on r.id_user = u2.id inner join {DB_SCHEMA}.workspaces w2 on w2.id = u2.id_workspace where r.id_publication = p.id and r.type = 'read') can_read_users, (select rtrim(concat(case when u.id is not null then w.name || ',' end, string_agg(w2.name, ',') || ',', case when p.everyone_can_write then %s || ',' end ), ',') from {DB_SCHEMA}.rights r inner join {DB_SCHEMA}.users u2 on r.id_user = u2.id inner join {DB_SCHEMA}.workspaces w2 on w2.id = u2.id_workspace where r.id_publication = p.id and r.type = 'write') can_write_users, count(*) OVER() AS full_count from {DB_SCHEMA}.workspaces w inner join {DB_SCHEMA}.publications p on p.id_workspace = w.id left join {DB_SCHEMA}.users u on u.id_workspace = w.id """ select_params = ( ROLE_EVERYONE, ROLE_EVERYONE, ) ######################################################### # WHERE clause where_params = tuple() where_parts = list() for ( value, where_part, params, ) in where_params_def: if value: where_parts.append(where_part) where_params = where_params + params where_clause = '' if where_parts: where_clause = 'WHERE ' + '\n AND '.join(where_parts) + '\n' ######################################################### # ORDER BY clause order_by_params = tuple() order_by_parts = list() for order_by_part in order_by_list: order_by_parts.append(order_by_definition[order_by_part][0]) order_by_params = order_by_params + order_by_definition[order_by_part][ 1] order_by_parts.append('w.name ASC') order_by_parts.append('p.name ASC') order_by_clause = 'ORDER BY ' + ', '.join(order_by_parts) ######################################################### # Pagination clause pagination_params = tuple() pagination_clause = '' if limit is not None: assert limit >= 0 assert isinstance(limit, int) pagination_clause = pagination_clause + f' LIMIT {limit} ' if offset is not None: assert offset >= 0 assert isinstance(offset, int) pagination_clause = pagination_clause + f' OFFSET {offset} ' ######################################################### # Put it together sql_params = select_params + where_params + order_by_params + pagination_params select = select_clause + where_clause + order_by_clause + pagination_clause values = db_util.run_query(select, sql_params) # print(f'get_publication_infos:\n\nselect={select}\n\nsql_params={sql_params}\n\n&&&&&&&&&&&&&&&&&') infos = {( workspace_name, type, publication_name, ): { 'id': id_publication, 'name': publication_name, 'title': title, 'uuid': uuid, 'type': type, 'style_type': style_type, 'updated_at': updated_at, 'bounding_box': [xmin, ymin, xmax, ymax], 'access_rights': { 'read': can_read_users.split(','), 'write': can_write_users.split(',') } } for id_publication, workspace_name, type, publication_name, title, uuid, style_type, updated_at, xmin, ymin, xmax, ymax, can_read_users, can_write_users, _ in values} if values: total_count = values[0][-1] else: count_clause = f""" select count(*) AS full_count from {DB_SCHEMA}.workspaces w inner join {DB_SCHEMA}.publications p on p.id_workspace = w.id left join {DB_SCHEMA}.users u on u.id_workspace = w.id """ sql_params = where_params select = count_clause + where_clause count = db_util.run_query(select, sql_params) total_count = count[0][-1] if infos: start = offset + 1 if offset else 1 content_range = (start, start + len(infos) - 1) else: content_range = (0, 0) result = { 'items': infos, 'total_count': total_count, 'content_range': content_range, } return result