Beispiel #1
0
async def user_groups_ids(aiopg_engine: Engine) -> Iterator[List[int]]:
    """Inits groups table and returns group identifiers"""

    cols = ("gid", "name", "description", "type", "thumbnail", "inclusion_rules")
    data = [
        (34, "john.smith", "primary group for user", "PRIMARY", None, {}),
        (
            20001,
            "Team Black",
            "External testers",
            "STANDARD",
            "http://mib.org",
            {"email": "@(foo|testers|mib)+.(org|com)$"},
        ),
    ]
    # pylint: disable=no-value-for-parameter

    async with aiopg_engine.acquire() as conn:
        for row in data:
            # NOTE: The 'default' dialect with current database version settings does not support in-place multirow inserts
            stmt = groups.insert().values(**dict(zip(cols, row)))
            await conn.execute(stmt)

    gids = [
        1,
    ] + [items[0] for items in data]

    yield gids

    async with aiopg_engine.acquire() as conn:
        await conn.execute(services_meta_data.delete())
        await conn.execute(groups.delete().where(groups.c.gid.in_(gids[1:])))
async def user_id(pg_engine: Engine) -> int:
    async with pg_engine.acquire() as conn:
        # a 'me' user
        uid = await conn.scalar(
            users.insert().values(**(random_user())).returning(users.c.id))
    yield uid
    # cleanup
    async with pg_engine.acquire() as conn:
        # a 'me' user
        uid = await conn.execute(users.delete().where(users.c.id == uid))
async def output_file(user_id: int, project_id: str,
                      postgres_engine: Engine) -> Iterable[FileMetaData]:

    node_id = "fd6f9737-1988-341b-b4ac-0614b646fa82"

    # pylint: disable=no-value-for-parameter

    file = FileMetaData()
    file.simcore_from_uuid(f"{project_id}/{node_id}/filename.txt",
                           bucket_name="master-simcore")
    file.entity_tag = "df9d868b94e53d18009066ca5cd90e9f"
    file.user_name = "test"
    file.user_id = str(user_id)

    async with postgres_engine.acquire() as conn:
        stmt = (
            file_meta_data.insert().values(**attr.asdict(file), ).returning(
                literal_column("*")))
        result = await conn.execute(stmt)
        row = await result.fetchone()

        # hacks defect
        file.user_id = str(user_id)
        file.location_id = str(file.location_id)
        # --
        assert file == FileMetaData(**dict(row))  # type: ignore

        yield file

        result = await conn.execute(file_meta_data.delete().where(
            file_meta_data.c.file_uuid == row.file_uuid))
Beispiel #4
0
async def login(*, db: Engine, credentials_data: dict, living_time: int,
                private_key: str) -> str:
    """
    Steps for authenticate user:
    1. check its credentials, by login and password, exists in database etc
    2. create refresh token
    3. create access token with link to refresh token
        link for refresh token needs to a later refresh and logout
    :param db: database engine
    :param credentials_data: dict user credentials
    :param living_time: token's living time (in sec.)
    :param private_key: private key for signature JWT
    :return: JWT for user
    """
    async with db.acquire() as conn:  # type: SAConnection
        user = await identity_user(conn=conn,
                                   credentials_data=credentials_data)

        refresh_token = await create_refresh_token(conn=conn, user=user)

        user_data_token = to_user_data_token(user)
        token = await create_access_token(user_data_token=user_data_token,
                                          refresh_token=refresh_token,
                                          living_time=living_time,
                                          private_key=private_key)
        return token
Beispiel #5
0
async def reenter_txn(pool: SAEngine, conn: SAConnection):
    if conn is None:
        async with pool.acquire() as conn, conn.begin():
            yield conn
    else:
        async with conn.begin_nested():
            yield conn
Beispiel #6
0
 async def top(
     cls,
     pg: Engine,
     phrase: str,
     from_date: str,
     to_date: str,
     top_count: int = 3,
 ):
     """Top Authors."""
     rows = []
     query = text("""
         SELECT json_build_object(
             'author_id', a.author_id,
             'counter', sum(a.counter)) as data
             FROM authors a JOIN query q ON a.query_id = q.id
             WHERE lower(q.phrase) = lower(:phrase)
                 AND a.published_at >= date(:from_date)
                 AND a.published_at <= date(:to_date)
             GROUP BY a.author_id
             ORDER BY sum(a.counter) DESC
             LIMIT :top_count
         """)
     async with pg.acquire() as conn:
         async for row in conn.execute(
                 query,
                 dict(
                     phrase=phrase,
                     from_date=from_date,
                     to_date=to_date,
                     top_count=top_count,
                 ),
         ):
             rows.append(row[0])
     return rows
Beispiel #7
0
async def project_id(user_id: int, postgres_engine: Engine) -> Iterable[UUID]:
    # inject a random project for user in db. This will give user_id, the full project's ownership

    # pylint: disable=no-value-for-parameter
    stmt = (projects.insert().values(**random_project(
        prj_owner=user_id)).returning(projects.c.uuid))
    print(str(stmt))
    async with postgres_engine.acquire() as conn:
        result = await conn.execute(stmt)
        [prj_uuid] = (await result.fetchone()).as_tuple()

    yield UUID(prj_uuid)

    async with postgres_engine.acquire() as conn:
        await conn.execute(projects.delete().where(projects.c.uuid == prj_uuid)
                           )
async def create_cluster(
    pg_engine: Engine, faker: Faker
) -> AsyncIterable[Callable[..., Awaitable[int]]]:
    cluster_ids = []

    async def creator(**overrides) -> int:
        insert_values = {
            "name": "default cluster name",
            "type": ClusterType.ON_PREMISE,
            "description": None,
            "endpoint": faker.domain_name(),
            "authentication": faker.pydict(value_types=str),
        }
        insert_values.update(overrides)
        async with pg_engine.acquire() as conn:
            cluster_id = await conn.scalar(
                clusters.insert().values(**insert_values).returning(clusters.c.id)
            )
        cluster_ids.append(cluster_id)
        assert cluster_id
        return cluster_id

    yield creator

    # cleanup
    async with pg_engine.acquire() as conn:
        await conn.execute(clusters.delete().where(clusters.c.id.in_(cluster_ids)))
async def engine(pg_engine: Engine):
    # injects ...
    async with pg_engine.acquire() as conn:
        # a 'me' user
        user_id = await conn.scalar(users.insert().values(**random_user(
            name=USERNAME)).returning(users.c.id))
        # has a project 'parent'
        await conn.execute(projects.insert().values(
            **random_project(prj_owner=user_id, name=PARENT_PROJECT_NAME)))
    yield pg_engine
async def test_cannot_remove_owner_that_owns_cluster(
    pg_engine: Engine,
    user_id: int,
    user_group_id: int,
    create_cluster: Callable[..., Awaitable[int]],
):
    cluster_id = await create_cluster(owner=user_group_id)
    # now try removing the user
    async with pg_engine.acquire() as conn:
        with pytest.raises(ForeignKeyViolation):
            await conn.execute(users.delete().where(users.c.id == user_id))

    # now remove the cluster
    async with pg_engine.acquire() as conn:
        await conn.execute(clusters.delete().where(clusters.c.id == cluster_id))

    # removing the user should work now
    async with pg_engine.acquire() as conn:
        await conn.execute(users.delete().where(users.c.id == user_id))
Beispiel #11
0
async def services_db_tables_injector(aiopg_engine: Engine) -> Callable:
    """Returns a helper function to init
    services_meta_data and services_access_rights tables

    Can use service_catalog_faker to generate inputs

    Example:
        await services_db_tables_injector(
            [
                service_catalog_faker(
                    "simcore/services/dynamic/jupyterlab",
                    "0.0.1",
                    team_access=None,
                    everyone_access=None,
                    product=target_product,
                ),
                service_catalog_faker(
                    "simcore/services/dynamic/jupyterlab",
                    "0.0.7",
                    team_access=None,
                    everyone_access=None,
                    product=target_product,
                ),
            ]
        )
    """

    # pylint: disable=no-value-for-parameter
    async def inject_in_db(fake_catalog: List[Tuple]):
        # [(service, ar1, ...), (service2, ar1, ...) ]

        async with aiopg_engine.acquire() as conn:
            # NOTE: The 'default' dialect with current database version settings does not support in-place multirow inserts
            for service in [items[0] for items in fake_catalog]:
                insert_meta = pg_insert(services_meta_data).values(**service)
                upsert_meta = insert_meta.on_conflict_do_update(
                    index_elements=[
                        services_meta_data.c.key,
                        services_meta_data.c.version,
                    ],
                    set_=service,
                )
                await conn.execute(upsert_meta)

            for access_rights in itertools.chain(items[1:]
                                                 for items in fake_catalog):
                stmt_access = services_access_rights.insert().values(
                    access_rights)
                await conn.execute(stmt_access)

    yield inject_in_db

    async with aiopg_engine.acquire() as conn:
        await conn.execute(services_access_rights.delete())
        await conn.execute(services_meta_data.delete())
Beispiel #12
0
async def user_id(postgres_engine: Engine) -> Iterable[int]:
    # inject a random user in db

    # NOTE: Ideally this (and next fixture) should be done via webserver API but at this point
    # in time, the webserver service would bring more dependencies to other services
    # which would turn this test too complex.

    # pylint: disable=no-value-for-parameter
    stmt = users.insert().values(**random_user(name="test")).returning(
        users.c.id)
    print(str(stmt))
    async with postgres_engine.acquire() as conn:
        result = await conn.execute(stmt)
        row = await result.fetchone()

    assert isinstance(row.id, int)
    yield row.id

    async with postgres_engine.acquire() as conn:
        await conn.execute(users.delete().where(users.c.id == row.id))
Beispiel #13
0
async def logout(*, db: Engine, user_data_token: UserDataToken) -> None:
    """
    Steps for logout user:
    1. Delete refresh token form database
    Without refresh token user cannot do refresh
    """
    async with db.acquire() as conn:  # type: SAConnection
        try:
            await delete_refresh_token(conn=conn, id=user_data_token.jti)
        except app_exceptions.DoesNotExist:
            logger.debug(f'Logout: Refresh token does not exist')
            raise auth_exceptions.AuthenticateErrorRefreshToken
Beispiel #14
0
async def user(pg_engine: Engine) -> RowProxy:
    # some user
    async with pg_engine.acquire() as conn:
        result: Optional[ResultProxy] = await conn.execute(
            users.insert().values(**random_user(
                name=USERNAME)).returning(users))
        assert result.rowcount == 1

        _user: Optional[RowProxy] = await result.first()
        assert _user
        assert _user.name == USERNAME
        return _user
async def test_creating_snapshots(engine: Engine, create_snapshot: Callable,
                                  exclude: Set):

    async with engine.acquire() as conn:
        # get parent
        res: ResultProxy = await conn.execute(
            projects.select().where(projects.c.name == PARENT_PROJECT_NAME))
        parent_prj: Optional[RowProxy] = await res.first()

        assert parent_prj

        # take one snapshot
        first_snapshot_id = await create_snapshot(0, parent_prj, conn)

        # modify parent
        updated_parent_prj = await (await conn.execute(
            projects.update().values(description="foo").where(
                projects.c.id == parent_prj.id).returning(projects))).first()

        assert updated_parent_prj
        assert updated_parent_prj.id == parent_prj.id
        assert updated_parent_prj.description != parent_prj.description
        assert updated_parent_prj.creation_date < updated_parent_prj.last_change_date

        # take another snapshot
        second_snapshot_id = await create_snapshot(1, updated_parent_prj, conn)

        second_snapshot = await (await conn.execute(
            projects_snapshots.select().where(
                projects_snapshots.c.id == second_snapshot_id))).first()

        assert second_snapshot
        assert second_snapshot.id != first_snapshot_id
        assert second_snapshot.created_at == updated_parent_prj.last_change_date

        # get project corresponding to first snapshot
        j = projects.join(projects_snapshots,
                          projects.c.uuid == projects_snapshots.c.project_uuid)
        selected_snapshot_project = await (await conn.execute(
            projects.select().select_from(j).where(
                projects_snapshots.c.id == second_snapshot_id))).first()

        assert selected_snapshot_project
        assert selected_snapshot_project.uuid == second_snapshot.project_uuid
        assert parent_prj.uuid == second_snapshot.parent_uuid

        def extract(t):
            return {k: t[k] for k in t if k not in exclude.union({"name"})}

        assert extract(selected_snapshot_project) == extract(
            updated_parent_prj)
async def scicrunch_orm(pg_engine: Engine) -> Iterator[BaseOrm[str]]:
    # This is a table without dependencies and therefore easy to use as fixture
    class ScicrunchOrm(BaseOrm[str]):
        def __init__(self, connection: SAConnection):
            super().__init__(
                scicrunch_resources,
                connection,
                readonly={"creation_date", "last_change_date"},
                writeonce={"rrid"},
            )

    async with pg_engine.acquire() as conn:
        orm_obj = ScicrunchOrm(conn)
        yield orm_obj
async def test_access_rights_on_owned_project(user_id: int, project_id: UUID,
                                              postgres_engine: Engine):

    async with postgres_engine.acquire() as conn:

        access = await get_project_access_rights(conn, user_id,
                                                 str(project_id))
        assert access == AccessRights.all()

        # still NOT registered in file_meta_data BUT with prefix {project_id} owned by user
        access = await get_file_access_rights(
            conn, user_id,
            f"{project_id}/node_id/not-in-file-metadata-table.txt")
        assert access == AccessRights.all()
Beispiel #18
0
    async def save(cls, pg: Engine, query: str) -> int:
        """Upsert by unique `phrase` row into table `query`.

        Return it's `id`. Upsert `on_conflict_do_nothing`
        do not return `id`, so we need fetch it.
        """
        async with pg.acquire() as conn:
            async for row in conn.execute(cls.upsert({"phrase": query})):
                if row:
                    return row[0]
            async for row in conn.execute(cls.get_by_phrasee(query)):
                return row[0]
        assert row
        return row[0]
Beispiel #19
0
async def project(pg_engine: Engine, user: RowProxy) -> RowProxy:
    # a user's project
    async with pg_engine.acquire() as conn:
        result: Optional[ResultProxy] = await conn.execute(
            projects.insert().values(
                **random_project(prj_owner=user.id,
                                 name=PARENT_PROJECT_NAME)).returning(projects)
        )
        assert result.rowcount == 1

        _project: Optional[RowProxy] = await result.first()
        assert _project
        assert _project.name == PARENT_PROJECT_NAME
        return _project
Beispiel #20
0
async def raise_if_migration_not_ready(engine: Engine):
    """Ensures db migration is complete

    :raises DBMigrationError
    :raises
    """
    async with engine.acquire() as conn:
        version_num = await conn.scalar(
            'SELECT "version_num" FROM "alembic_version"')
        head_version_num = get_current_head()
        if version_num != head_version_num:
            raise DBMigrationError(
                f"Migration is incomplete, expected {head_version_num} but got {version_num}"
            )
Beispiel #21
0
async def products_names(aiopg_engine: Engine) -> Iterator[List[str]]:
    """Inits products db table and returns product names"""
    data = [
        # already upon creation: ("osparc", r"([\.-]{0,1}osparc[\.-])"),
        ("s4l", r"(^s4l[\.-])|(^sim4life\.)|(^api.s4l[\.-])|(^api.sim4life\.)"),
        ("tis", r"(^tis[\.-])|(^ti-solutions\.)"),
    ]

    # pylint: disable=no-value-for-parameter

    async with aiopg_engine.acquire() as conn:
        # NOTE: The 'default' dialect with current database version settings does not support in-place multirow inserts
        for name, regex in data:
            stmt = products.insert().values(name=name, host_regex=regex)
            await conn.execute(stmt)

    names = [
        "osparc",
    ] + [items[0] for items in data]
    yield names

    async with aiopg_engine.acquire() as conn:
        await conn.execute(products.delete())
async def test_multiple_snapshots_of_same_project(engine: Engine,
                                                  create_snapshot: Callable):
    async with engine.acquire() as conn:
        # get parent
        res: ResultProxy = await conn.execute(
            projects.select().where(projects.c.name == PARENT_PROJECT_NAME))
        parent_prj: Optional[RowProxy] = await res.first()
        assert parent_prj

        # take first snapshot
        await create_snapshot(0, parent_prj, conn)

        # no changes in the parent!
        with pytest.raises(UniqueViolation):
            await create_snapshot(1, parent_prj, conn)
Beispiel #23
0
async def check_credentials(db_engine: Engine, username: str,
                            password: str) -> bool:
    """Производит аутентификацию пользователя."""
    async with db_engine.acquire() as conn:
        where = and_(models.users.c.login == username, )

        query = models.users.select().where(where)
        result = await conn.execute(query)
        user = await result.fetchone()

        if user is not None:
            password_hash = user.passwd
            return sha256_crypt.verify(password, password_hash)

    return False
Beispiel #24
0
async def test_operations_on_group_classifiers(pg_engine: Engine,
                                               classifiers_bundle: Dict):
    # NOTE: mostly for TDD
    async with pg_engine.acquire() as conn:

        # creates a group
        stmt = (groups.insert().values(**random_group(
            name="MyGroup")).returning(groups.c.gid))
        gid = await conn.scalar(stmt)

        # adds classifiers to a group
        stmt = (group_classifiers.insert().values(bundle=classifiers_bundle,
                                                  gid=gid).returning(
                                                      literal_column("*")))
        result = await conn.execute(stmt)
        row = await result.first()

        assert row[group_classifiers.c.gid] == gid
        assert row[group_classifiers.c.bundle] == classifiers_bundle

        # get bundle in one query
        bundle = await conn.scalar(
            sa.select([group_classifiers.c.bundle
                       ]).where(group_classifiers.c.gid == gid))
        assert bundle
        assert classifiers_bundle == bundle

        # Cannot add more than one classifier's bundle to the same group
        # pylint: disable=no-member
        with pytest.raises(psycopg2.errors.UniqueViolation):
            await conn.execute(group_classifiers.insert().values(bundle={},
                                                                 gid=gid))

        # deleting a group deletes the classifier
        await conn.execute(groups.delete().where(groups.c.gid == gid))

        # FIXME: count returns 1 but the db is empty!??
        groups_count = 0  # await conn.scalar(groups.count())
        classifiers_count = await conn.scalar(group_classifiers.count())

        assert groups_count == 0
        assert classifiers_count <= groups_count

        # no bundle
        bundle = await conn.scalar(
            sa.select([group_classifiers.c.bundle
                       ]).where(group_classifiers.c.gid == gid))
        assert bundle is None
Beispiel #25
0
async def test_load_products(pg_engine: Engine, make_products_table,
                             product_sample):
    async with pg_engine.acquire() as conn:
        await make_products_table(conn)

        stmt = sa.select([products.c.name, products.c.host_regex])
        result: ResultProxy = await conn.execute(stmt)

        assert result.returns_rows

        rows: List[RowProxy] = await result.fetchall()

        assert {
            row[products.c.name]: row[products.c.host_regex]
            for row in rows
        } == product_sample
async def fake_scicrunch_ids(pg_engine: Engine) -> List[str]:
    row1 = dict(rrid="RRID:foo", name="foo", description="fooing")
    row2 = dict(rrid="RRID:bar", name="bar", description="barring")

    row_ids = []
    async with pg_engine.acquire() as conn:
        for row in (row1, row2):
            row_id = await conn.scalar(
                scicrunch_resources.insert()
                .values(**row)
                .returning(scicrunch_resources.c.rrid)
            )
            assert row_id, f"{row} failed"
            row_ids.append(row_id)

    return row_ids
Beispiel #27
0
 async def unique_tweets(cls,
                         pg: Engine,
                         phrase: str,
                         count: int,
                         offset: int = 0):
     """Return unique tweets by phrase with limit/offset."""
     rows = []
     count = min([count, 100])
     query = text("""
         SELECT to_jsonb(t) FROM tweets t JOIN query q ON t.query_id = q.id
             WHERE lower(q.phrase) = lower(:phrase)
             ORDER BY t.published_at desc LIMIT :count OFFSET :offset
         """)
     async with pg.acquire() as conn:
         async for row in conn.execute(
                 query, dict(phrase=phrase, count=count, offset=offset)):
             rows.append(row[0])
     return rows
Beispiel #28
0
 async def count_tweets(cls, pg: Engine, phrase: str, from_date: str,
                        to_date: str):
     """Count of tweets for given phrase and from_date/to_date."""
     rows = []
     query = text("""
         SELECT json_build_object('counter', count(t.id)) as data
             FROM tweets t JOIN query q ON t.query_id = q.id
             WHERE lower(q.phrase) = lower(:phrase)
                 AND t.published_at >= date(:from_date)
                 AND t.published_at <= date(:to_date)
         """)
     async with pg.acquire() as conn:
         async for row in conn.execute(
                 query,
                 dict(phrase=phrase, from_date=from_date, to_date=to_date),
         ):
             rows.append(row[0])
     return rows
async def engine(pg_engine: Engine):

    async with pg_engine.acquire() as conn:
        await conn.execute(users.insert().values(**random_user(name="A")))
        await conn.execute(users.insert().values(**random_user()))
        await conn.execute(users.insert().values(**random_user()))

        await conn.execute(
            projects.insert().values(**random_project(prj_owner=1)))
        await conn.execute(
            projects.insert().values(**random_project(prj_owner=2)))
        await conn.execute(
            projects.insert().values(**random_project(prj_owner=3)))
        with pytest.raises(ForeignKeyViolation):
            await conn.execute(
                projects.insert().values(**random_project(prj_owner=4)))

    yield pg_engine
Beispiel #30
0
async def test_cluster_owner_has_all_rights(
    pg_engine: Engine,
    user_group_id: int,
    create_cluster: Callable[..., Awaitable[int]],
):
    cluster_id = await create_cluster(owner=user_group_id)

    async with pg_engine.acquire() as conn:
        result: ResultProxy = await conn.execute(
            cluster_to_groups.select().where(
                cluster_to_groups.c.cluster_id == cluster_id))

        assert result.rowcount == 1
        row = await result.fetchone()
        assert row is not None

        assert row.read == True
        assert row.write == True
        assert row.delete == True