예제 #1
0
def adjust_prime_db_schema_for_fulltext_search():
    logger.info(f'    Alter DB prime schema for fulltext search')
    statement = f'''CREATE EXTENSION IF NOT EXISTS unaccent;
    drop index if exists {DB_SCHEMA}.title_tsv_idx;
    drop function if exists {DB_SCHEMA}.my_unaccent;

    CREATE FUNCTION {DB_SCHEMA}.my_unaccent(text) RETURNS tsvector LANGUAGE SQL IMMUTABLE AS 'SELECT to_tsvector(unaccent($1))';
    CREATE INDEX title_tsv_idx ON {DB_SCHEMA}.publications USING GIST ({DB_SCHEMA}.my_unaccent(title));
    '''

    db_util.run_statement(statement)
예제 #2
0
def set_bbox(workspace, publication_type, publication, bbox):
    query = f'''update {DB_SCHEMA}.publications set
    bbox = ST_MakeBox2D(ST_Point(%s, %s), ST_Point(%s ,%s))
    where type = %s
      and name = %s
      and id_workspace = (select w.id from {DB_SCHEMA}.workspaces w where w.name = %s);'''
    params = bbox + (
        publication_type,
        publication,
        workspace,
    )
    db_util.run_statement(query, params)
예제 #3
0
def preserve_data_version_table():
    copy_table = f'''create table {DB_SCHEMA}.data_version_backup as table {DB_SCHEMA}.data_version;'''
    with app.app_context():
        db_util.run_statement(copy_table)
    yield
    copy_table_back = f'''
    DROP TABLE IF EXISTS {DB_SCHEMA}.data_version;
    create table {DB_SCHEMA}.data_version as table {DB_SCHEMA}.data_version_backup;
    DROP TABLE IF EXISTS {DB_SCHEMA}.data_version_backup;
    '''
    with app.app_context():
        db_util.run_statement(copy_table_back)
예제 #4
0
def publications_constraint():
    drop_constraint = f'alter table {DB_SCHEMA}.publications drop constraint if exists con_style_type'
    with app.app_context():
        db_util.run_statement(drop_constraint)
    yield
    add_constraint = f"""DO $$ BEGIN
        alter table {DB_SCHEMA}.publications add constraint con_style_type
    check (type = 'layman.map' or style_type is not null);
    EXCEPTION
        WHEN duplicate_object THEN null;
    END $$;"""
    with app.app_context():
        db_util.run_statement(add_constraint)
예제 #5
0
def alter_schema():
    logger.info(f'    Starting - alter DB prime schema')
    db_schema = settings.LAYMAN_PRIME_SCHEMA
    add_column = f'''
DO $$ BEGIN
    CREATE TYPE {db_schema}.enum_style_type AS ENUM ('sld', 'qml');
EXCEPTION
    WHEN duplicate_object THEN null;
END $$;
    ALTER TABLE {db_schema}.publications ADD COLUMN IF NOT EXISTS
    style_type {db_schema}.enum_style_type;'''
    db_util.run_statement(add_column)
    logger.info(f'    DONE - alter DB prime schema')
예제 #6
0
def remove_rights(id_publication, users_list, right_type):
    sql = f'''delete from {DB_SCHEMA}.rights
where id_publication = %s
  and type = %s
  and id_user = (select u.id
                 from {DB_SCHEMA}.users u inner join
                      {DB_SCHEMA}.workspaces w on w.id = u.id_workspace
                 where w.name = %s);'''
    for username in users_list:
        db_util.run_statement(sql, (
            id_publication,
            right_type,
            username,
        ))
예제 #7
0
def update_style_type_in_db():
    logger.info(f'    Starting - fulfill style type column in DB')
    db_schema = settings.LAYMAN_PRIME_SCHEMA

    update_layers = f"""update {db_schema}.publications set style_type = 'sld' where type = 'layman.layer'"""
    db_util.run_statement(update_layers)
    add_constraint = f"""DO $$ BEGIN
    alter table {db_schema}.publications add constraint con_style_type
check (type = 'layman.map' or style_type is not null);
EXCEPTION
    WHEN duplicate_object THEN null;
END $$;"""
    db_util.run_statement(add_constraint)

    logger.info(f'    DONE - fulfill style type column in DB')
예제 #8
0
def delete_publication(workspace_name, type, name):
    workspace_info = workspaces.get_workspace_infos(workspace_name).get(workspace_name)
    if workspace_info:
        id_publication = get_publication_infos(workspace_name, type).get((workspace_name, type, name), dict()).get("id")
        if id_publication:
            rights.delete_rights_for_publication(id_publication)
            id_workspace = workspace_info["id"]
            sql = f"""delete from {DB_SCHEMA}.publications p where p.id_workspace = %s and p.name = %s and p.type = %s;"""
            db_util.run_statement(sql, (id_workspace,
                                        name,
                                        type,))
        else:
            logger.warning(f'Deleting NON existing publication. workspace_name={workspace_name}, type={type}, pub_name={name}')
    else:
        logger.warning(f'Deleting publication for NON existing workspace. workspace_name={workspace_name}, type={type}, pub_name={name}')
예제 #9
0
def set_bbox(workspace, publication_type, publication, bbox, crs, ):
    max_bbox = crs_def.CRSDefinitions[crs].max_bbox if crs else None
    cropped_bbox = (
        min(max(bbox[0], max_bbox[0]), max_bbox[2]),
        min(max(bbox[1], max_bbox[1]), max_bbox[3]),
        max(min(bbox[2], max_bbox[2]), max_bbox[0]),
        max(min(bbox[3], max_bbox[3]), max_bbox[1]),
    ) if not bbox_util.is_empty(bbox) and max_bbox else bbox
    srid = db_util.get_srid(crs)
    query = f'''update {DB_SCHEMA}.publications set
    bbox = ST_MakeBox2D(ST_Point(%s, %s), ST_Point(%s ,%s)),
    srid = %s
    where type = %s
      and name = %s
      and id_workspace = (select w.id from {DB_SCHEMA}.workspaces w where w.name = %s);'''
    params = cropped_bbox + (srid, publication_type, publication, workspace,)
    db_util.run_statement(query, params)
예제 #10
0
def test_adjust_prime_db_schema_for_bbox_search():
    workspace = 'test_adjust_prime_db_schema_for_bbox_search_workspace'
    layer = 'test_adjust_prime_db_schema_for_bbox_search_layer'
    map = 'test_adjust_prime_db_schema_for_bbox_search_map'

    expected_bbox_layer = test_data.SMALL_LAYER_BBOX
    expected_bbox_map = test_data.SMALL_MAP_BBOX

    process_client.publish_workspace_layer(workspace, layer)
    process_client.publish_workspace_map(workspace, map)
    with app.app_context():
        statement = f'ALTER TABLE {db_schema}.publications ALTER COLUMN bbox DROP NOT NULL;'
        db_util.run_statement(statement)
        statement = f'update {db_schema}.publications set bbox = null;'
        db_util.run_statement(statement)

        query = f'select p.id from {db_schema}.publications p where p.bbox is not null;'
        results = db_util.run_query(query)
        assert not results, results

        upgrade_v1_12.adjust_data_for_bbox_search()

        for publication_type, publication, expected_bbox in [
            ('layman.layer', layer, expected_bbox_layer),
            ('layman.map', map, expected_bbox_map)
        ]:
            query = f'''
            select ST_XMIN(p.bbox),
                   ST_YMIN(p.bbox),
                   ST_XMAX(p.bbox),
                   ST_YMAX(p.bbox)
            from {db_schema}.publications p inner join
                 {db_schema}.workspaces w on p.id_workspace = w.id
            where w.name = %s
              and p.type = %s
              and p.name = %s
            ;'''
            results = db_util.run_query(
                query, (workspace, publication_type, publication))
            assert len(results) == 1 and len(results[0]) == 4, results
            bbox = results[0]
            assert_util.assert_same_bboxes(bbox, expected_bbox, 0.000001)

    process_client.delete_workspace_layer(workspace, layer)
예제 #11
0
def test_adjust_prime_db_schema_for_last_change_search():
    workspace = 'test_adjust_prime_db_schema_for_last_change_search_workspace'
    layer = 'test_adjust_prime_db_schema_for_last_change_search_layer'
    map = 'test_adjust_prime_db_schema_for_last_change_search_map'

    timestamp1 = datetime.datetime.now(datetime.timezone.utc)
    process_client.publish_workspace_layer(workspace, layer)
    process_client.publish_workspace_map(workspace, map)
    timestamp2 = datetime.datetime.now(datetime.timezone.utc)
    with app.app_context():
        statement = f'ALTER TABLE {db_schema}.publications ALTER COLUMN updated_at DROP NOT NULL;'
        db_util.run_statement(statement)
        statement = f'update {db_schema}.publications set updated_at = null;'
        db_util.run_statement(statement)

        query = f'select p.id from {db_schema}.publications p where p.updated_at is not null;'
        results = db_util.run_query(query)
        assert not results, results

        upgrade_v1_12.adjust_data_for_last_change_search()

        query = f'''
select p.updated_at
from {db_schema}.publications p inner join
     {db_schema}.workspaces w on p.id_workspace = w.id
where w.name = %s
  and p.type = %s
  and p.name = %s
;'''
        results = db_util.run_query(query, (workspace, 'layman.layer', layer))
        assert len(results) == 1 and len(results[0]) == 1, results
        layer_updated_at = results[0][0]
        assert timestamp1 < layer_updated_at < timestamp2

        results = db_util.run_query(query, (workspace, 'layman.map', map))
        assert len(results) == 1 and len(results[0]) == 1, results
        map_updated_at = results[0][0]
        assert timestamp1 < map_updated_at < timestamp2

        assert layer_updated_at < map_updated_at

    process_client.delete_workspace_layer(workspace, layer)
    process_client.delete_workspace_map(workspace, map)
예제 #12
0
def adjust_db_for_schema_migrations():
    logger.info(f'    Alter DB prime schema for schema migrations')

    add_type = f'''
    DO $$ BEGIN
        CREATE TYPE {DB_SCHEMA}.enum_migration_type AS ENUM ('{consts.MIGRATION_TYPE_DATA}', '{consts.MIGRATION_TYPE_SCHEMA}');
    EXCEPTION
        WHEN duplicate_object THEN null;
    END $$;'''
    db_util.run_statement(add_type)
    add_column = f'''ALTER TABLE {DB_SCHEMA}.data_version ADD COLUMN IF NOT EXISTS
        migration_type {DB_SCHEMA}.enum_migration_type UNIQUE;'''
    db_util.run_statement(add_column)
    update_data = f'''update {DB_SCHEMA}.data_version set migration_type = '{consts.MIGRATION_TYPE_SCHEMA}';'''
    db_util.run_statement(update_data)
    insert_schema = f'''insert into {DB_SCHEMA}.data_version (major_version, minor_version, patch_version, migration, migration_type)
    values (-1, -1, -1, -1, '{consts.MIGRATION_TYPE_DATA}')'''
    db_util.run_statement(insert_schema)
    statement = f'ALTER TABLE {DB_SCHEMA}.data_version ALTER COLUMN migration_type SET NOT NULL;'
    db_util.run_statement(statement)
예제 #13
0
def adjust_data_for_last_change_search():
    logger.info(f'    Starting - Set updated_at for all publications')
    query = f'''select p.id,
       w.name,
       p.type,
       p.name
from {DB_SCHEMA}.publications p inner join
     {DB_SCHEMA}.workspaces w on w.id = p.id_workspace
;'''
    publications = db_util.run_query(query)
    for (
            row_id,
            workspace,
            type,
            name,
    ) in publications:
        publ_dir = os.path.join(
            fs_util.get_workspaces_dir(),
            workspace,
            type.split('.')[1] + 's',
            name,
        )
        updated_at = None
        for root, _, files in os.walk(publ_dir):
            for file in files:
                file_updated_at = os.stat(os.path.join(root, file)).st_mtime
                updated_at = max(
                    updated_at,
                    file_updated_at) if updated_at else file_updated_at
        updated_at = datetime.datetime.fromtimestamp(updated_at, datetime.timezone.utc)\
            if updated_at else datetime.datetime.now(datetime.timezone.utc)

        update = f'update {DB_SCHEMA}.publications set updated_at = %s where id = %s;'
        db_util.run_statement(update, (
            updated_at,
            row_id,
        ))

    statement = f'ALTER TABLE {DB_SCHEMA}.publications ALTER COLUMN updated_at SET NOT NULL;'
    db_util.run_statement(statement)
    logger.info(f'    DONE - Set updated_at for all publications')
예제 #14
0
def set_current_migration_version(migration_type, version):
    try:
        sql_insert = f'''update {DB_SCHEMA}.data_version set
            major_version = %s,
            minor_version = %s,
            patch_version = %s,
            migration = %s
        where migration_type = '{migration_type}';'''
        db_util.run_statement(sql_insert, version, encapsulate_exception=False)
    except psycopg2.errors.UndefinedColumn:  # pylint: disable=no-member
        assert migration_type == consts.MIGRATION_TYPE_SCHEMA

        sql_select = f'''select count(*) from {DB_SCHEMA}.data_version;'''
        sql_result = db_util.run_query(sql_select)
        assert sql_result[0][0] == 1

        sql_insert = f'''update {DB_SCHEMA}.data_version set
            major_version = %s,
            minor_version = %s,
            patch_version = %s,
            migration = %s;'''
        db_util.run_statement(sql_insert, version)
예제 #15
0
def test_schema():
    with app.app_context():
        run_statement(model.DROP_SCHEMA_SQL)
        ensure_schema(settings.LAYMAN_PRIME_SCHEMA, )

        workspaces = run_query(f'select count(*) from {DB_SCHEMA}.workspaces;')
        assert workspaces[0][0] == len(util.get_workspaces())
        users = run_query(f'select count(*) from {DB_SCHEMA}.users;')
        assert users[0][0] == len(util.get_usernames(use_cache=False))
        user_infos = workspaces_util.get_workspace_infos(USERNAME)
        assert USERNAME in user_infos
        select_publications = f"""with const as (select %s workspace_name)
select w.name as workspace_name,
       p.type,
       p.name
from const c inner join
     {DB_SCHEMA}.workspaces w on w.name = c.workspace_name inner join
     {DB_SCHEMA}.publications p on p.id_workspace = w.id left join
     {DB_SCHEMA}.users u on u.id_workspace = w.id
;"""
        pub_infos = run_query(select_publications, (USERNAME, ))
        assert (USERNAME, LAYER_TYPE, LAYERNAME) in pub_infos
        assert (USERNAME, MAP_TYPE, MAPNAME) in pub_infos
예제 #16
0
def initialize_data_versioning():
    logger.info(f'    Starting - data versioning initialization')

    sql_create_table = f'''CREATE TABLE IF NOT EXISTS {DB_SCHEMA}.data_version
    (
        major_version integer not null,
        minor_version integer not null,
        patch_version integer not null,
        migration integer not null
    )
    TABLESPACE pg_default;'''
    db_util.run_statement(sql_create_table)

    # This table should have only one row and now should have none, otherwise something is wrong.
    sql_select_count = f'''select count(*) from {DB_SCHEMA}.data_version'''
    row_count = db_util.run_query(sql_select_count)[0][0]
    assert row_count == 0

    # Set initialization value to 0
    sql_insert = f'''insert into {DB_SCHEMA}.data_version (major_version, minor_version, patch_version, migration) values (1, 9, 0, 0);'''
    db_util.run_statement(sql_insert)

    logger.info(f'    DONE - data versioning initialization')
예제 #17
0
def ensure_schema(db_schema):
    if not schema_exists():
        try:
            db_util.run_statement(model.CREATE_SCHEMA_SQL)
            db_util.run_statement(model.setup_codelists_data())
        except BaseException as exc:
            db_util.run_statement(model.DROP_SCHEMA_SQL,
                                  conn_cur=db_util.create_connection_cursor())
            raise exc
    else:
        logger.info(
            f"Layman DB schema already exists, schema_name={db_schema}")
예제 #18
0
def adjust_prime_db_schema_for_bbox_search():
    logger.info(f'    Alter DB prime schema for search by bbox')
    statement = f'ALTER TABLE {DB_SCHEMA}.publications ADD COLUMN IF NOT EXISTS bbox box2d;'
    db_util.run_statement(statement)
예제 #19
0
def delete_rights_for_publication(id_publication):
    sql = f'''delete from {DB_SCHEMA}.rights where id_publication = %s;'''
    db_util.run_statement(sql, (id_publication, ))
예제 #20
0
def adjust_data_for_bbox_search():
    logger.info(f'    Starting - Set bbox for all publications')

    query = f'''
    select  w.name,
            p.type,
            p.name
    from {DB_SCHEMA}.publications p inner join
         {DB_SCHEMA}.workspaces w on w.id = p.id_workspace
    where p.type = %s
    '''
    params = (LAYER_TYPE, )
    publications = db_util.run_query(query, params)
    for (workspace, _, layer) in publications:
        logger.info(f'      Migrate layer {workspace}.{layer}')
        table_query = f'''
        SELECT count(*)
        FROM pg_tables
        WHERE schemaname = '{workspace}'
          AND tablename = '{layer}'
          AND tableowner = '{settings.LAYMAN_PG_USER}'
        ;'''
        cnt = db_util.run_query(table_query)[0][0]
        if cnt == 0:
            logger.warning(
                f'      Layer DB table not available, not migrating.')
            continue

        bbox_query = f'''
        select ST_Extent(l.wkb_geometry) bbox
        from {workspace}.{layer} l
        ;'''
        bbox = db_util.run_query(bbox_query)[0]
        set_layer_bbox_query = f'''
        update {DB_SCHEMA}.publications set
            bbox = %s
        where type = %s
          and name = %s
          and id_workspace = (select w.id from {DB_SCHEMA}.workspaces w where w.name = %s);'''
        db_util.run_statement(set_layer_bbox_query, (
            bbox,
            LAYER_TYPE,
            layer,
            workspace,
        ))

    params = (MAP_TYPE, )
    publications = db_util.run_query(query, params)
    for (workspace, _, map) in publications:
        from layman.map.filesystem import input_file
        logger.info(f'      Migrate map {workspace}.{map}')
        map_json = input_file.get_map_json(workspace, map)
        bbox_4326 = float(map_json['extent'][0]), float(map_json['extent'][1]),\
            float(map_json['extent'][2]), float(map_json['extent'][3])
        query_transform = f'''
        with tmp as (select ST_Transform(ST_SetSRID(ST_MakeBox2D(ST_Point(%s, %s), ST_Point(%s, %s)), %s), %s) bbox)
        select st_xmin(bbox),
               st_ymin(bbox),
               st_xmax(bbox),
               st_ymax(bbox)
        from tmp
        ;'''
        params = bbox_4326 + (
            4326,
            3857,
        )
        try:
            bbox_3857 = db_util.run_query(query_transform,
                                          params,
                                          encapsulate_exception=False)[0]
        except psycopg2.errors.InternalError_:  # pylint: disable=no-member
            logger.warning(
                f'        Bounding box not transformed, so set to None. Bounding box in 4326: "{bbox_4326}"'
            )
            bbox_3857 = (None, None, None, None)

        set_map_bbox_query = f'''
        update {DB_SCHEMA}.publications set
            bbox = ST_MakeBox2D(ST_Point(%s, %s), ST_Point(%s ,%s))
        where type = %s
          and name = %s
          and id_workspace = (select w.id from {DB_SCHEMA}.workspaces w where w.name = %s);'''
        params = bbox_3857 + (
            MAP_TYPE,
            map,
            workspace,
        )
        db_util.run_statement(set_map_bbox_query, params)

    logger.info(f'     DONE - Set bbox for all publications')
예제 #21
0
def test_get_current_version(sql_command, expected_value, migration_type):
    with app.app_context():
        db_util.run_statement(sql_command)
        currrent_data_version = upgrade.get_current_version(migration_type)
        assert currrent_data_version == expected_value
예제 #22
0
def migrate_users_and_publications():
    workspace_names = global_get_workspaces(use_cache=False)

    layer_context = {
        'sources_filter':
        'layman.layer.filesystem.uuid, layman.layer.filesystem.input_chunk, '
        'layman.layer.filesystem.input_file, layman.layer.filesystem.input_style, layman.layer.db.table, '
        'layman.layer.qgis.wms, layman.layer.geoserver.wfs, layman.layer.geoserver.wms, '
        'layman.layer.geoserver.sld, layman.layer.filesystem.thumbnail, layman.layer.micka.soap'
    }
    map_context = {
        'sources_filter':
        'layman.map.filesystem.uuid, layman.map.filesystem.input_file, layman.map.filesystem.thumbnail, '
        'layman.map.micka.soap'
    }

    for workspace_name in workspace_names:
        userinfo = get_authn_info(workspace_name)
        id_workspace = workspaces.ensure_workspace(workspace_name)
        if userinfo:
            # It is personal workspace
            iss_sub_infos = users.get_user_infos(
                iss_sub={
                    'issuer_id': userinfo["iss_id"],
                    'sub': userinfo["sub"]
                })
            if iss_sub_infos:
                username_in_conflict, iss_sub_info = iss_sub_infos.popitem()
                raise LaymanError(
                    f"Two workspaces are registered as private workspaces of the same user. To migrate successfully, "
                    f"choose which workspace should be the only private workspace of the user, delete authn.txt file "
                    f"from the other workspace, and restart layman. The other workspace becomes public.",
                    data={
                        'user': iss_sub_info,
                        'workspaces': [workspace_name, username_in_conflict]
                    })
            userinfo['issuer_id'] = userinfo['iss_id']
            users.ensure_user(id_workspace, userinfo)
            everyone_can_write = False
        else:
            # It is public workspace, so all publications are available to everybody
            everyone_can_write = True

        for (publ_type, infos_method,
             context) in [(LAYER_TYPE, get_layer_infos, layer_context),
                          (MAP_TYPE, get_map_infos, map_context)]:
            publications = infos_method(workspace_name)
            for name in publications:
                info = layman_util.get_publication_info(
                    workspace_name, publ_type, name, context)
                insert_publications_sql = f'''insert into {DB_SCHEMA}.publications as p
                        (id_workspace, name, title, type, uuid, everyone_can_read, everyone_can_write) values
                        (%s, %s, %s, %s, %s, %s, %s)
                returning id
                ;'''

                data = (
                    id_workspace,
                    name,
                    info.get("title", name),
                    publ_type,
                    info["uuid"],
                    True,
                    everyone_can_write,
                )
                db_util.run_statement(insert_publications_sql, data)
예제 #23
0
def test_update_style_type_in_db():
    workspace = 'test_update_style_type_in_db_workspace'
    map = 'test_update_style_type_in_db_map'
    TestLayerDef = namedtuple('TestLayerDef', [
        'name',
        'style_file',
        'expected_style',
    ])
    layers = [
        TestLayerDef(
            'test_update_style_type_in_db_none_layer',
            '',
            'sld',
        ),
        TestLayerDef(
            'test_update_style_type_in_db_sld_layer',
            'sample/style/generic-blue_sld.xml',
            'sld',
        ),
        TestLayerDef(
            'test_update_style_type_in_db_sld110_layer',
            'sample/style/sld_1_1_0.xml',
            'sld',
        ),
        # This should not happened, because before this release, it was not possible to upload QGIS files
        TestLayerDef(
            'test_update_style_type_in_db_qgis_layer',
            'sample/style/small_layer.qml',
            'sld',
        ),
    ]

    process_client.publish_workspace_map(workspace, map)
    for layer in layers:
        process_client.publish_workspace_layer(
            workspace,
            layer.name,
            style_file=layer.style_file,
        )

    set_column_null = f"""update {DB_SCHEMA}.publications set style_type = null"""

    with app.app_context():
        db_util.run_statement(set_column_null)
        map_info = map_util.get_map_info(workspace, map)
        assert map_info['style_type'] is None
        for layer in layers:
            layer_info = layer_util.get_layer_info(workspace, layer.name)
            assert layer_info['style_type'] is None

        upgrade_v1_10.update_style_type_in_db()

        map_info = map_util.get_map_info(workspace, map)
        assert map_info['style_type'] is None
        for layer in layers:
            layer_info = layer_util.get_layer_info(workspace, layer.name)
            assert layer_info['style_type'] == layer.expected_style

    process_client.delete_workspace_map(workspace, map)
    for layer in layers:
        process_client.delete_workspace_layer(
            workspace,
            layer.name,
        )
예제 #24
0
def adjust_prime_db_schema_for_last_change_search():
    logger.info(f'    Alter DB prime schema for search by updated_at')
    statement = f'ALTER TABLE {DB_SCHEMA}.publications ADD COLUMN IF NOT EXISTS updated_at timestamp with time zone;'
    db_util.run_statement(statement)
예제 #25
0
def delete_workspace(name):
    sql = f"delete from {DB_SCHEMA}.workspaces where name = %s;"
    db_util.run_statement(sql, (name, ))
예제 #26
0
def test_bbox_crop():
    def assert_out_of_the_box_publications(expected_count):
        query = f'''select count(*)
        from {DB_SCHEMA}.publications p
        where st_xMin(p.bbox) < -20026376.39
           or st_yMin(p.bbox) < -20048966.10
           or st_xMax(p.bbox) > 20026376.39
           or st_yMax(p.bbox) > 20048966.10
        ;'''
        with app.app_context():
            cnt = db_util.run_query(query)
        assert cnt[0][0] == expected_count, cnt

    main_workspace = 'test_bbox_crop_workspace'

    publications = [
        (
            process_client.LAYER_TYPE,
            main_workspace,
            'test_bbox_crop_layer',
            {
                'file_paths': [
                    'sample/layman.layer/small_layer.cpg',
                    'sample/layman.layer/small_layer.dbf',
                    'sample/layman.layer/small_layer.prj',
                    'sample/layman.layer/small_layer.shp',
                    'sample/layman.layer/small_layer.shx',
                ],
            },
        ),
        (
            process_client.LAYER_TYPE,
            main_workspace,
            'test_bbox_crop_qml_layer',
            {
                'file_paths': [
                    'sample/layman.layer/small_layer.cpg',
                    'sample/layman.layer/small_layer.dbf',
                    'sample/layman.layer/small_layer.prj',
                    'sample/layman.layer/small_layer.shp',
                    'sample/layman.layer/small_layer.shx',
                ],
                'style_file':
                'sample/style/small_layer.qml'
            },
        ),
        (
            process_client.MAP_TYPE,
            main_workspace,
            'test_bbox_crop_map',
            dict(),
        ),
    ]
    for publication_type, workspace, publication, params in publications:
        process_client.publish_workspace_publication(publication_type,
                                                     workspace, publication,
                                                     **params)

    big_bbox = (
        -20026376.39 - 1,
        -20048966.10 - 1,
        20026376.39 + 1,
        20048966.10 + 1,
    )

    query = f'''update {DB_SCHEMA}.publications set
    bbox = ST_MakeBox2D(ST_Point(%s, %s), ST_Point(%s ,%s))
    where type = %s
      and name = %s
      and id_workspace = (select w.id from {DB_SCHEMA}.workspaces w where w.name = %s);'''

    for publication_type, workspace, publication, _ in publications:
        params = big_bbox + (
            publication_type,
            publication,
            workspace,
        )
        with app.app_context():
            db_util.run_statement(query, params)

    assert_out_of_the_box_publications(len(publications))

    with app.app_context():
        upgrade_v1_14.crop_bbox()

    assert_out_of_the_box_publications(0)

    for publication_type, workspace, publication, _ in publications:
        if publication_type == process_client.LAYER_TYPE:
            assert_util.assert_all_sources_bbox(workspace, publication, (
                -20026376.39,
                -20048966.10,
                20026376.39,
                20048966.10,
            ))

    for publication_type, workspace, publication, _ in publications:
        process_client.delete_workspace_publication(publication_type,
                                                    workspace, publication)
예제 #27
0
파일: users.py 프로젝트: index-git/layman
def delete_user(username):
    sql = f"delete from {DB_SCHEMA}.users where id_workspace = (select w.id from {DB_SCHEMA}.workspaces w where w.name = %s);"
    deleted = db_util.run_statement(sql, (username, ))
    if deleted:
        workspaces.delete_workspace(username)
예제 #28
0
def crop_bbox():
    logger.info(f'    Crop bounding boxes')
    query = f'''select w.name,
    p.type,
    p.name,
    ST_XMIN(p.bbox) as xmin,
    ST_YMIN(p.bbox) as ymin,
    ST_XMAX(p.bbox) as xmax,
    ST_YMAX(p.bbox) as ymax
from {DB_SCHEMA}.publications p inner join
     {DB_SCHEMA}.workspaces w on w.id = p.id_workspace
where st_xMin(p.bbox) < -20026376.39
   or st_yMin(p.bbox) < -20048966.10
   or st_xMax(p.bbox) > 20026376.39
   or st_yMax(p.bbox) > 20048966.10
;'''
    publications = db_util.run_query(query)
    for workspace, publ_type, publication, xmin, ymin, xmax, ymax in publications:
        info = layman_util.get_publication_info(workspace, publ_type, publication, context={'keys': ['style_type', 'file', 'uuid',
                                                                                                     'native_crs', ], })

        original_bbox = (xmin, ymin, xmax, ymax)
        cropped_bbox = (
            max(original_bbox[0], -20026376.39),
            max(original_bbox[1], -20048966.10),
            min(original_bbox[2], 20026376.39),
            min(original_bbox[3], 20048966.10),
        )
        query = f'''update {DB_SCHEMA}.publications set
        bbox = ST_MakeBox2D(ST_Point(%s, %s), ST_Point(%s ,%s))
        where type = %s
          and name = %s
          and id_workspace = (select w.id from {DB_SCHEMA}.workspaces w where w.name = %s);'''
        params = cropped_bbox + (publ_type, publication, workspace,)
        db_util.run_statement(query, params)

        if publ_type == LAYER_TYPE:
            file_type = info['file']['file_type']
            assert file_type == settings.FILE_TYPE_VECTOR
            crs = info['native_crs']

            #  WFS
            bbox = geoserver.get_layer_bbox(workspace, publication)
            gs_util.patch_feature_type(workspace, publication, auth=settings.LAYMAN_GS_AUTH, bbox=bbox, crs=crs)
            gs_wfs.clear_cache(workspace)

            #  WMS
            style_type = info['style_type']
            geoserver_workspace = gs_wms.get_geoserver_workspace(workspace)
            if style_type == 'sld':
                gs_util.patch_feature_type(geoserver_workspace, publication, auth=settings.LAYMAN_GS_AUTH, bbox=bbox, crs=crs)
            elif style_type == 'qml':
                qgis_wms.save_qgs_file(workspace, publication)
                gs_util.patch_wms_layer(geoserver_workspace, publication, auth=settings.LAYMAN_GS_AUTH, bbox=bbox, crs=crs)
            gs_wms.clear_cache(workspace)

            # Thumbnail
            layer_thumbnail.generate_layer_thumbnail(workspace, publication)

            # Micka soap
            layer_micka_soap.patch_layer(workspace, publication, metadata_properties_to_refresh=['extent'])

            md_props = layer_util.get_metadata_comparison(workspace, publication, cached=False)
            assert md_props['metadata_properties']['extent']['equal'], f'{md_props}'
        elif publ_type == MAP_TYPE:
            # Micka soap
            map_micka_soap.patch_map(workspace, publication, metadata_properties_to_refresh=['extent'])