async def engine(make_engine, loop):
    engine = await make_engine()
    sync_engine = make_engine(False)
    metadata.drop_all(sync_engine)
    metadata.create_all(sync_engine)

    async with engine.acquire() as conn:
        await conn.execute(users.insert().values(**random_user(name="A")))
        await conn.execute(users.insert().values(**random_user()))
        await conn.execute(users.insert().values(**random_user()))

        await conn.execute(
            projects.insert().values(**random_project(prj_owner=1)))
        await conn.execute(
            projects.insert().values(**random_project(prj_owner=2)))
        await conn.execute(
            projects.insert().values(**random_project(prj_owner=3)))
        with pytest.raises(ForeignKeyViolation):
            await conn.execute(
                projects.insert().values(**random_project(prj_owner=4)))

    yield engine

    engine.close()
    await engine.wait_closed()
    async def start():
        engine = await make_engine()
        sync_engine = make_engine(False)
        metadata.drop_all(sync_engine)
        metadata.create_all(sync_engine)

        async with engine.acquire() as conn:
            await conn.execute(users.insert().values(**random_user(name="A")))
            await conn.execute(users.insert().values(**random_user()))
            await conn.execute(users.insert().values(**random_user()))

            await conn.execute(
                projects.insert().values(**random_project(prj_owner=1)))
            await conn.execute(
                projects.insert().values(**random_project(prj_owner=2)))
            await conn.execute(
                projects.insert().values(**random_project(prj_owner=3)))
            with pytest.raises(ForeignKeyViolation):
                await conn.execute(
                    projects.insert().values(**random_project(prj_owner=4)))

            await conn.execute(user_to_projects.insert().values(user_id=1,
                                                                project_id=1))
            await conn.execute(user_to_projects.insert().values(user_id=1,
                                                                project_id=2))
            await conn.execute(user_to_projects.insert().values(user_id=2,
                                                                project_id=3))

        return engine
Exemplo n.º 3
0
async def test_user_group_uniqueness(make_engine):
    engine = await make_engine()
    sync_engine = make_engine(is_async=False)
    metadata.drop_all(sync_engine)
    metadata.create_all(sync_engine)

    async with engine.acquire() as conn:
        rory_group = await _create_group(conn,
                                         name="Rory Storm and the Hurricanes")
        ringo = await _create_user(conn, "Ringo", rory_group)
        # test unique user/group pair
        with pytest.raises(UniqueViolation,
                           match="user_to_groups_uid_gid_key"):
            await conn.execute(user_to_groups.insert().values(
                uid=ringo.id, gid=rory_group.gid))

        # Checks implementation of simcore_service_webserver/groups_api.py:get_group_from_gid
        res: ResultProxy = await conn.execute(
            groups.select().where(groups.c.gid == rory_group.gid))

        the_one: Optional[RowProxy] = await res.first()
        assert the_one.type == the_one["type"]

        with pytest.raises(aiopg.sa.exc.ResourceClosedError):
            await res.fetchone()
Exemplo n.º 4
0
def migrated_pg_tables_context(
        postgres_config: Dict[str, Any]) -> Iterator[Dict[str, Any]]:
    """
    Within the context, tables are created and dropped
    using migration upgrade/downgrade routines
    """

    cfg = deepcopy(postgres_config)
    cfg.update(
        dsn="postgresql://{user}:{password}@{host}:{port}/{database}".format(
            **postgres_config))

    simcore_postgres_database.cli.discover.callback(**postgres_config)
    simcore_postgres_database.cli.upgrade.callback("head")

    yield cfg

    # downgrades database to zero ---
    #
    # NOTE: This step CANNOT be avoided since it would leave the db in an invalid state
    # E.g. 'alembic_version' table is not deleted and keeps head version or routines
    # like 'notify_comp_tasks_changed' remain undeleted
    #
    simcore_postgres_database.cli.downgrade.callback("base")
    simcore_postgres_database.cli.clean.callback(
    )  # just cleans discover cache

    # FIXME: migration downgrade fails to remove User types
    # SEE https://github.com/ITISFoundation/osparc-simcore/issues/1776
    # Added drop_all as tmp fix
    postgres_engine = sa.create_engine(cfg["dsn"])
    metadata.drop_all(bind=postgres_engine)
Exemplo n.º 5
0
def apply_migration(postgres_service: Dict, make_engine) -> None:
    kwargs = postgres_service.copy()
    kwargs.pop("dsn")
    pg_cli.discover.callback(**kwargs)
    pg_cli.upgrade.callback("head")
    yield
    pg_cli.downgrade.callback("base")
    pg_cli.clean.callback()

    # FIXME: deletes all because downgrade is not reliable!
    engine = make_engine(False)
    metadata.drop_all(engine)
Exemplo n.º 6
0
def postgres_db(postgres_dsn: Dict[str, str],
                docker_stack: Dict) -> sa.engine.Engine:
    url = DSN.format(**postgres_dsn)
    # Attempts until responsive
    wait_till_postgres_is_responsive(url)
    # Configures db and initializes tables
    engine = sa.create_engine(url, isolation_level="AUTOCOMMIT")
    metadata.create_all(bind=engine, checkfirst=True)

    yield engine

    metadata.drop_all(engine)
    engine.dispose()
Exemplo n.º 7
0
async def test_user_group_uniqueness(make_engine):
    engine = await make_engine()
    sync_engine = make_engine(False)
    metadata.drop_all(sync_engine)
    metadata.create_all(sync_engine)
    async with engine.acquire() as conn:
        rory_group = await _create_group(conn,
                                         name="Rory Storm and the Hurricanes")
        ringo = await _create_user(conn, "Ringo", rory_group)
        # test unique user/group pair
        with pytest.raises(UniqueViolation,
                           match="user_to_groups_uid_gid_key"):
            await conn.execute(user_to_groups.insert().values(
                uid=ringo.id, gid=rory_group.gid))
Exemplo n.º 8
0
async def test_all_group(make_engine):
    engine = await make_engine()
    sync_engine = make_engine(is_async=False)
    metadata.drop_all(sync_engine)
    metadata.create_all(sync_engine)
    async with engine.acquire() as conn:
        # now check the only available group is the all group
        groups_count = await conn.scalar(
            select([func.count()]).select_from(groups))
        assert groups_count == 1

        result = await conn.execute(
            groups.select().where(groups.c.type == GroupType.EVERYONE))
        all_group_gid = (await result.fetchone()).gid
        assert all_group_gid == 1  # it's the first group so it gets a 1
        # try removing the all group
        with pytest.raises(RaiseException):
            await conn.execute(
                groups.delete().where(groups.c.gid == all_group_gid))

        # check adding a user is automatically added to the all group
        result = await conn.execute(users.insert().values(
            **random_user()).returning(literal_column("*")))
        user: RowProxy = await result.fetchone()

        result = await conn.execute(user_to_groups.select().where(
            user_to_groups.c.gid == all_group_gid))
        user_to_groups_row: RowProxy = await result.fetchone()
        assert user_to_groups_row.uid == user.id
        assert user_to_groups_row.gid == all_group_gid

        # try removing the all group
        with pytest.raises(RaiseException):
            await conn.execute(
                groups.delete().where(groups.c.gid == all_group_gid))

        # remove the user now
        await conn.execute(users.delete().where(users.c.id == user.id))
        users_count = await conn.scalar(
            select([func.count()]).select_from(users))
        assert users_count == 0

        # check the all group still exists
        groups_count = await conn.scalar(
            select([func.count()]).select_from(groups))
        assert groups_count == 1
        result = await conn.execute(
            groups.select().where(groups.c.type == GroupType.EVERYONE))
        all_group_gid = (await result.fetchone()).gid
        assert all_group_gid == 1  # it's the first group so it gets a 1
    async def start():
        engine = await make_engine()
        sync_engine = make_engine(False)
        metadata.drop_all(sync_engine)
        metadata.create_all(sync_engine)

        async with engine.acquire() as conn:
            await conn.execute(
                comp_pipeline.insert().values(**fake_pipeline(project_id="PA"))
            )
            await conn.execute(
                comp_pipeline.insert().values(**fake_pipeline(project_id="PB"))
            )

        return engine
Exemplo n.º 10
0
async def test_own_group(make_engine):
    engine = await make_engine()
    sync_engine = make_engine(is_async=False)
    metadata.drop_all(sync_engine)
    metadata.create_all(sync_engine)
    async with engine.acquire() as conn:
        result = await conn.execute(users.insert().values(
            **random_user()).returning(literal_column("*")))
        user: RowProxy = await result.fetchone()
        assert not user.primary_gid

        # now fetch the same user that shall have a primary group set by the db
        result = await conn.execute(users.select().where(users.c.id == user.id)
                                    )
        user: RowProxy = await result.fetchone()
        assert user.primary_gid

        # now check there is a primary group
        result = await conn.execute(
            groups.select().where(groups.c.type == GroupType.PRIMARY))
        primary_group: RowProxy = await result.fetchone()
        assert primary_group.gid == user.primary_gid

        groups_count = await conn.scalar(
            select([func.count(groups.c.gid)
                    ]).where(groups.c.gid == user.primary_gid))
        assert groups_count == 1

        relations_count = await conn.scalar(
            select([func.count()]).select_from(user_to_groups))
        assert relations_count == 2  # own group + all group

        # try removing the primary group
        with pytest.raises(ForeignKeyViolation):
            await conn.execute(
                groups.delete().where(groups.c.gid == user.primary_gid))

        # now remove the users should remove the primary group
        await conn.execute(users.delete().where(users.c.id == user.id))
        users_count = await conn.scalar(
            select([func.count()]).select_from(users))
        assert users_count == 0
        groups_count = await conn.scalar(
            select([func.count()]).select_from(groups))
        assert groups_count == 1  # the all group is still around
        relations_count = await conn.scalar(
            select([func.count()]).select_from(user_to_groups))
        assert relations_count == (users_count + users_count)
Exemplo n.º 11
0
def apply_migration(postgres_service: Dict, make_engine) -> Iterator[None]:
    # NOTE: this is equivalent to packages/pytest-simcore/src/pytest_simcore/postgres_service.py::postgres_db
    # but we do override postgres_dsn -> postgres_engine -> postgres_db because we want the latter
    # fixture to have local scope
    #
    kwargs = postgres_service.copy()
    kwargs.pop("dsn")
    pg_cli.discover.callback(**kwargs)
    pg_cli.upgrade.callback("head")

    yield

    pg_cli.downgrade.callback("base")
    pg_cli.clean.callback()
    # FIXME: deletes all because downgrade is not reliable!
    engine = make_engine(is_async=False)
    metadata.drop_all(engine)
async def engine(make_engine):

    engine = await make_engine()
    sync_engine = make_engine(is_async=False)
    metadata.drop_all(sync_engine)
    metadata.create_all(sync_engine)

    async with engine.acquire() as conn:
        await conn.execute(
            comp_pipeline.insert().values(**fake_pipeline(project_id="PA")))
        await conn.execute(
            comp_pipeline.insert().values(**fake_pipeline(project_id="PB")))

    yield engine

    engine.close()
    await engine.wait_closed()
Exemplo n.º 13
0
def postgres_db(
    postgres_dsn: Dict,
    postgres_engine: sa.engine.Engine,
) -> sa.engine.Engine:

    # upgrades database from zero
    kwargs = postgres_dsn.copy()
    pg_cli.discover.callback(**kwargs)
    pg_cli.upgrade.callback("head")

    yield postgres_engine

    # downgrades database to zero ---
    #
    # NOTE: This step CANNOT be avoided since it would leave the db in an invalid state
    # E.g. 'alembic_version' table is not deleted and keeps head version or routines
    # like 'notify_comp_tasks_changed' remain undeleted
    #
    pg_cli.downgrade.callback("base")
    pg_cli.clean.callback()  # just cleans discover cache

    # FIXME: migration downgrade fails to remove User types SEE https://github.com/ITISFoundation/osparc-simcore/issues/1776
    # Added drop_all as tmp fix
    metadata.drop_all(postgres_engine)
Exemplo n.º 14
0
async def test_group(make_engine):
    engine = await make_engine()
    sync_engine = make_engine(False)
    metadata.drop_all(sync_engine)
    metadata.create_all(sync_engine)
    async with engine.acquire() as conn:
        rory_group = await _create_group(conn,
                                         name="Rory Storm and the Hurricanes")
        quarrymen_group = await _create_group(conn, name="The Quarrymen")
        await _create_user(conn, "John", quarrymen_group)
        await _create_user(conn, "Paul", quarrymen_group)
        await _create_user(conn, "Georges", quarrymen_group)
        pete = await _create_user(conn, "Pete", quarrymen_group)
        ringo = await _create_user(conn, "Ringo", rory_group)

        # rationale: following linux user/group system, each user has its own group (primary group) + whatever other group (secondary groups)
        # check DB contents
        users_count = await conn.scalar(users.count())
        assert users_count == 5
        groups_count = await conn.scalar(groups.count())
        assert groups_count == (
            users_count + 2 + 1
        )  # user primary groups, other groups, all group
        relations_count = await conn.scalar(user_to_groups.count())
        assert relations_count == (users_count + users_count + users_count)

        # change group name
        result = await conn.execute(
            groups.update().where(groups.c.gid == quarrymen_group.gid).values(
                name="The Beatles").returning(literal_column("*")))
        beatles_group = await result.fetchone()
        assert beatles_group.modified > quarrymen_group.modified

        # delete 1 user
        await conn.execute(users.delete().where(users.c.id == pete.id))

        # check DB contents
        users_count = await conn.scalar(users.count())
        assert users_count == 4
        groups_count = await conn.scalar(groups.count())
        assert groups_count == (users_count + 2 + 1)
        relations_count = await conn.scalar(user_to_groups.count())
        assert relations_count == (users_count + users_count + users_count)

        # add one user to another group
        await conn.execute(user_to_groups.insert().values(
            uid=ringo.id, gid=beatles_group.gid))

        # check DB contents
        users_count = await conn.scalar(users.count())
        assert users_count == 4
        groups_count = await conn.scalar(groups.count())
        assert groups_count == (users_count + 2 + 1)
        relations_count = await conn.scalar(user_to_groups.count())
        assert relations_count == (users_count + users_count + 1 + users_count)

        # delete 1 group
        await conn.execute(
            groups.delete().where(groups.c.gid == rory_group.gid))

        # check DB contents
        users_count = await conn.scalar(users.count())
        assert users_count == 4
        groups_count = await conn.scalar(groups.count())
        assert groups_count == (users_count + 1 + 1)
        relations_count = await conn.scalar(user_to_groups.count())
        assert relations_count == (users_count + users_count + users_count)

        # delete the other group
        await conn.execute(
            groups.delete().where(groups.c.gid == beatles_group.gid))

        # check DB contents
        users_count = await conn.scalar(users.count())
        assert users_count == 4
        groups_count = await conn.scalar(groups.count())
        assert groups_count == (users_count + 0 + 1)
        relations_count = await conn.scalar(user_to_groups.count())
        assert relations_count == (users_count + users_count)