示例#1
0
async def _acquire_connection(engine: Engine = Depends(
    _get_db_engine)) -> SAConnection:
    logger.debug(
        "Acquiring pg connection from pool: pool size=%d, acquired=%d, free=%d, reserved=[%d, %d]",
        engine.size,
        engine.size - engine.freesize,
        engine.freesize,
        engine.minsize,
        engine.maxsize,
    )
    if engine.freesize <= 1:
        logger.warning(
            "Last or no pg connection in pool: pool size=%d, acquired=%d, free=%d, reserved=[%d, %d]",
            engine.size,
            engine.size - engine.freesize,
            engine.freesize,
            engine.minsize,
            engine.maxsize,
        )

    async with engine.acquire() as conn:
        yield conn

    logger.debug(
        "Released pg connection: pool size=%d, acquired=%d, free=%d, reserved=[%d, %d]",
        engine.size,
        engine.size - engine.freesize,
        engine.freesize,
        engine.minsize,
        engine.maxsize,
    )
示例#2
0
    async def _get_repo(
        engine: Engine = Depends(_get_db_engine),
    ) -> AsyncGenerator[BaseRepository, None]:

        logger.debug(
            "Acquiring pg connection from pool: current=%d, free=%d, reserved=[%d, %d]",
            engine.size,
            engine.freesize,
            engine.minsize,
            engine.maxsize,
        )
        if engine.freesize <= 1:
            logger.warning(
                "Last or no pg connection in pool: current=%d, free=%d, reserved=[%d, %d]",
                engine.size,
                engine.freesize,
                engine.minsize,
                engine.maxsize,
            )

        async with engine.acquire() as conn:
            yield repo_type(conn)

        logger.debug(
            "Released pg connection: current=%d, free=%d, reserved=[%d, %d]",
            engine.size,
            engine.freesize,
            engine.minsize,
            engine.maxsize,
        )
示例#3
0
    async def save(self, db: Engine):
        """
        Create or update an order.

        If current order instance isn't created, then inserts new row,
        and sets it as self.uuid attribute value.
        Otherwise just updates row in database with order instance data.
        """
        table = db.tables['orders']
        data = {
            'id_on_exchange': self.id_on_exchange,
            'status': self.status,
            'pair': self.pair,
            'side': self.side,
            'price': self.price,
            'base': self.base.amount,
            'quote': self.quote.amount,
            'exchange': self.exchange.name,
            'strategy': self.strategy,
            'created_at': self.created_at,
            'expired_at': self.expired_at,
            'executed_at': self.executed_at,
        }
        async with db.acquire() as conn:
            if not self.uuid:
                self.uuid = await conn.scalar(table.insert().values(**data))
            else:
                await conn.execute(table.update().where(
                    table.c.uuid == self.uuid).values(**data))
async def _set_adjacency_in_pipeline_db(db_engine: Engine, project_id: str,
                                        dag_adjacency_list: Dict):
    # pylint: disable=no-value-for-parameter
    async with db_engine.acquire() as conn:
        # READ
        # get pipeline
        query = sa.select([comp_pipeline
                           ]).where(comp_pipeline.c.project_id == project_id)
        result = await conn.execute(query)
        pipeline = await result.first()

        # WRITE
        if pipeline is None:
            # create pipeline
            log.debug("No pipeline for project %s, creating one", project_id)
            query = comp_pipeline.insert().values(
                project_id=project_id,
                dag_adjacency_list=dag_adjacency_list,
                state=UNKNOWN,
            )
        else:
            # update pipeline
            log.debug("Found pipeline for project %s, updating it", project_id)
            query = (comp_pipeline.update().where(
                comp_pipeline.c.project_id == project_id).values(
                    dag_adjacency_list=dag_adjacency_list, state=UNKNOWN))

        await conn.execute(query)
示例#5
0
async def inspect(
    # pylint: disable=too-many-arguments
    db_engine: Engine,
    rabbit_mq: RabbitMQ,
    job_request_id: str,
    user_id: str,
    project_id: str,
    node_id: Optional[str],
) -> Optional[List[str]]:
    log.debug(
        "ENTERING inspect with user %s pipeline:node %s: %s",
        user_id,
        project_id,
        node_id,
    )

    task: Optional[RowProxy] = None
    graph: Optional[nx.DiGraph] = None
    async with db_engine.acquire() as connection:
        pipeline: RowProxy = await _get_pipeline_from_db(
            connection, project_id)
        graph = execution_graph(pipeline)
        if not node_id:
            log.debug("NODE id was zero, this was the entry node id")
            return find_entry_point(graph)
        task = await _try_get_task_from_db(connection, graph, job_request_id,
                                           project_id, node_id)

    if not task:
        log.debug("no task at hand, let's rest...")
        return

    # config nodeports
    node_ports.node_config.USER_ID = user_id
    node_ports.node_config.NODE_UUID = task.node_id
    node_ports.node_config.PROJECT_ID = task.project_id

    # now proceed actually running the task (we do that after the db session has been closed)
    # try to run the task, return empyt list of next nodes if anything goes wrong
    run_result = FAILED
    next_task_nodes = []
    try:
        executor = Executor(
            db_engine=db_engine,
            rabbit_mq=rabbit_mq,
            task=task,
            user_id=user_id,
        )
        await executor.run()
        next_task_nodes = list(graph.successors(node_id))
        run_result = SUCCESS
    except asyncio.CancelledError:
        log.warning("Task has been cancelled")
        raise

    finally:
        await _set_task_status(db_engine, project_id, node_id, run_result)

    return next_task_nodes
示例#6
0
async def raise_if_not_responsive(engine: Engine):
    async with engine.acquire() as conn:
        # pylint: disable=protected-access

        # NOTE: Hacks aiopg.sa.SAConnection interface
        #       to override connection's cursor timeout
        cursor = await conn._open_cursor()
        await cursor.execute("SELECT 1 as is_alive", timeout=1)
示例#7
0
async def check_credentials(engine: Engine, email: str, password: str) -> bool:
    async with engine.acquire() as conn:
        query = users.select().where(
            sa.and_(users.c.email == email,
                    users.c.status != UserStatus.BANNED))
        ret = await conn.execute(query)
        user = await ret.fetchone()
        if user is not None:
            return check_password(password, user["password_hash"])
    return False
示例#8
0
async def get_all_categories(engine: Engine, ):

    table: Table = Category.__table__

    async with engine.acquire() as conn:
        async with conn.begin():

            result = await conn.execute(table.select())
            output = resultproxy_to_dict(result)
    return output
示例#9
0
async def _set_task_status(db_engine: Engine, project_id: str, node_id: str,
                           run_result):
    async with db_engine.acquire() as connection:
        await connection.execute(
            # FIXME: E1120:No value for argument 'dml' in method call
            # pylint: disable=E1120
            comp_tasks.update().where(
                and_(
                    comp_tasks.c.node_id == node_id,
                    comp_tasks.c.project_id == project_id,
                )).values(state=run_result, end=datetime.utcnow()))
示例#10
0
    async def delete(self, db: Engine):
        """
        Delete current order from database.

        Throws exception if current order instance doesn't have database id.
        """
        if not self.uuid:
            raise ValueError(f'Invalid order UUID: {self.uuid}')

        table = db.tables['orders']

        async with db.acquire() as conn:
            await conn.execute(table.delete().where(table.c.uuid == self.uuid))
        self.uuid = None
示例#11
0
async def get_entity_by_id(engine: Engine, entity_id: int):

    table: Table = Entity.__table__

    async with engine.acquire() as conn:
        async with conn.begin():

            result = await conn.execute(
                table.select().where(table.c.id == entity_id))
            output = resultproxy_to_dict(result)

    if len(output) == 0:
        return None

    return output[0]
示例#12
0
async def _set_tasks_in_tasks_db(db_engine: Engine, project_id: str,
                                 tasks: Dict):
    async with db_engine.acquire() as conn:
        query = sa.select([ComputationalTask]).\
                where(ComputationalTask.__table__.c.project_id == project_id)
        result = await conn.execute(query)
        tasks_db = await result.fetchall()
        # delete tasks that were deleted from the db
        for task_db in tasks_db:
            if not task_db.node_id in tasks:
                query = ComputationalTask.__table__.delete().\
                        where(and_(ComputationalTask.__table__.c.project_id == project_id,
                                    ComputationalTask.__table__.c.node_id == task_db.node_id))
                await conn.execute(query)
        internal_id = 1
        for node_id in tasks:
            task = tasks[node_id]
            query = sa.select([ComputationalTask]).\
                    where(and_(ComputationalTask.__table__.c.project_id == project_id,
                                ComputationalTask.__table__.c.node_id == node_id))
            result = await conn.execute(query)
            comp_task = await result.fetchone()
            if comp_task is None:
                # add a new one
                query = ComputationalTask.__table__.insert().\
                        values(project_id=project_id,
                                node_id=node_id,
                                internal_id=internal_id,
                                image = task["image"],
                                schema = task["schema"],
                                inputs = task["inputs"],
                                outputs = task["outputs"],
                                submit = datetime.datetime.utcnow())
                internal_id = internal_id + 1
            else:
                query = ComputationalTask.__table__.update().\
                        where(and_(ComputationalTask.__table__.c.project_id==project_id,
                                ComputationalTask.__table__.c.node_id==node_id)).\
                        values(job_id = None,
                                state = 0,
                                image = task["image"],
                                schema = task["schema"],
                                inputs = task["inputs"],
                                outputs = task["outputs"] if "file-picker" in task["image"]["name"] else comp_task.outputs,
                                submit = datetime.datetime.utcnow())
            await conn.execute(query)
示例#13
0
async def get_entity_by_category_name(engine: Engine, category_title: str):

    entity_table: Table = Entity.__table__
    category_table: Table = Category.__table__

    join = sa.join(entity_table, category_table,
                   entity_table.c.category_id == category_table.c.id)
    query = (sa.select([entity_table],
                       use_labels=False).select_from(join).where(
                           category_table.c.title == category_title))

    async with engine.acquire() as conn:
        async with conn.begin():

            result = await conn.execute(query)
            output = resultproxy_to_dict(result)

    return output
示例#14
0
 async def _query_db(uid: str, engine: Engine) -> List[RowProxy]:
     async with engine.acquire() as conn:
         query = (sa.select(
             [
                 users.c.email,
                 users.c.role,
                 users.c.name,
                 users.c.primary_gid,
                 groups.c.gid,
                 groups.c.name,
                 groups.c.description,
                 groups.c.type,
             ],
             use_labels=True,
         ).select_from(
             users.join(
                 user_to_groups.join(groups,
                                     user_to_groups.c.gid == groups.c.gid),
                 users.c.id == user_to_groups.c.uid,
             )).where(users.c.id == uid).order_by(sa.asc(groups.c.name)))
         result = await conn.execute(query)
         return await result.fetchall()
示例#15
0
async def _set_adjacency_in_pipeline_db(db_engine: Engine, project_id: str,
                                        dag_adjacency_list: Dict):
    async with db_engine.acquire() as conn:
        query = sa.select([ComputationalPipeline]).\
                    where(ComputationalPipeline.__table__.c.project_id==project_id)
        result = await conn.execute(query)
        pipeline = await result.first()
        if pipeline is None:
            # let's create one then
            query = ComputationalPipeline.__table__.insert().\
                    values(project_id=project_id,
                            dag_adjacency_list=dag_adjacency_list,
                            state=0)

            log.debug("Pipeline object created")
        else:
            # let's modify it
            log.debug("Pipeline object found")
            query = ComputationalPipeline.__table__.update().\
                        where(ComputationalPipeline.__table__.c.project_id == project_id).\
                        values(state=0,
                                dag_adjacency_list=dag_adjacency_list)
        await conn.execute(query)
示例#16
0
async def _set_tasks_in_tasks_db(db_engine: Engine,
                                 project_id: str,
                                 tasks: Dict[str, Dict],
                                 replace_pipeline=True):
    # pylint: disable=no-value-for-parameter

    async def _task_already_exists(conn: SAConnection, project_id: str,
                                   node_id: str) -> bool:
        task_count: int = await conn.scalar(
            sa.select([sa.func.count()]).where(
                and_(
                    comp_tasks.c.project_id == project_id,
                    comp_tasks.c.node_id == node_id,
                )))
        assert task_count in (  # nosec
            0,
            1,
        ), f"Uniqueness violated: task_count={task_count}"  # nosec
        return task_count != 0

    async def _update_task(conn: SAConnection, task: Dict, project_id: str,
                           node_id: str) -> None:
        # update task's inputs/outputs
        io_update = {}
        task_inputs: str = await conn.scalar(
            sa.select([comp_tasks.c.inputs]).where(
                and_(
                    comp_tasks.c.project_id == project_id,
                    comp_tasks.c.node_id == node_id,
                )))
        # updates inputs
        if task_inputs != task["inputs"]:
            io_update["inputs"] = task["inputs"]

        # update outputs
        #  NOTE: update ONLY outputs of front-end nodes. The rest are
        #  updated by backend services (e.g. workers, interactive services)
        if task["outputs"] and task["node_class"] == NodeClass.FRONTEND:
            io_update["outputs"] = task["outputs"]

        if io_update:
            query = (comp_tasks.update().where(
                and_(
                    comp_tasks.c.project_id == project_id,
                    comp_tasks.c.node_id == node_id,
                )).values(**io_update))

            await conn.execute(query)

    # MAIN -----------

    async with db_engine.acquire() as conn:

        if replace_pipeline:
            # get project tasks already stored
            query = sa.select([comp_tasks
                               ]).where(comp_tasks.c.project_id == project_id)
            result = await conn.execute(query)
            tasks_rows = await result.fetchall()

            # prune database from invalid tasks
            for task_row in tasks_rows:
                if not task_row.node_id in tasks:
                    query = comp_tasks.delete().where(
                        and_(
                            comp_tasks.c.project_id == project_id,
                            comp_tasks.c.node_id == task_row.node_id,
                        ))
                    await conn.execute(query)

        internal_id = 1
        for node_id, task in tasks.items():

            is_new_task: bool = not await _task_already_exists(
                conn, project_id, node_id)
            try:
                if is_new_task:
                    # create task
                    query = comp_tasks.insert().values(
                        project_id=project_id,
                        node_id=node_id,
                        node_class=task["node_class"],
                        internal_id=internal_id,
                        image=task["image"],
                        schema=task["schema"],
                        inputs=task["inputs"],
                        outputs=task["outputs"] if task["outputs"] else {},
                        submit=datetime.datetime.utcnow(),
                    )

                    await conn.execute(query)
                    internal_id = internal_id + 1

            except psycopg2.errors.UniqueViolation:  # pylint: disable=no-member
                # avoids race condition
                is_new_task = False

            if not is_new_task:
                if replace_pipeline:
                    # replace task
                    query = (comp_tasks.update().where(
                        and_(
                            comp_tasks.c.project_id == project_id,
                            comp_tasks.c.node_id == node_id,
                        )).values(
                            job_id=None,
                            state=UNKNOWN,
                            node_class=task["node_class"],
                            image=task["image"],
                            schema=task["schema"],
                            inputs=task["inputs"],
                            outputs=task["outputs"] if task["outputs"] else {},
                            submit=datetime.datetime.utcnow(),
                        ))
                    await conn.execute(query)
                else:
                    await _update_task(conn, task, project_id, node_id)
示例#17
0
async def run_computational_task(
    # pylint: disable=too-many-arguments
    db_engine: Engine,
    rabbit_mq: RabbitMQ,
    job_request_id: str,
    user_id: str,
    project_id: str,
    node_id: str,
    retry: int,
    max_retries: int,
    sidecar_mode: BootMode,
) -> None:

    async with db_engine.acquire() as connection:
        task: Optional[RowProxy] = None
        graph: Optional[nx.DiGraph] = None
        try:
            log.debug(
                "ENTERING inspect with user %s pipeline:node %s: %s",
                user_id,
                project_id,
                node_id,
            )

            pipeline: RowProxy = await _get_pipeline_from_db(
                connection, project_id)
            graph = execution_graph(pipeline)
            log.debug("NODE id is %s, getting the task from DB...", node_id)
            task = await _try_get_task_from_db(connection, job_request_id,
                                               project_id, node_id)

            if not task:
                log.warning(
                    "Worker received task for user %s, project %s, node %s, but the task is already taken! this should not happen. going back to sleep...",
                    user_id,
                    project_id,
                    node_id,
                )
                return
        except asyncio.CancelledError:
            log.warning("Task has been cancelled")
            await _set_task_state(connection, project_id, node_id,
                                  StateType.ABORTED)
            raise

        run_result = StateType.FAILED
        try:
            await rabbit_mq.post_log_message(
                user_id,
                project_id,
                node_id,
                "[sidecar]Task found: starting...",
            )

            # now proceed actually running the task (we do that after the db session has been closed)
            # try to run the task, return empyt list of next nodes if anything goes wrong
            executor = Executor(
                db_engine=db_engine,
                rabbit_mq=rabbit_mq,
                task=task,
                user_id=user_id,
                sidecar_mode=sidecar_mode,
            )
            await executor.run()
            run_result = StateType.SUCCESS
        except asyncio.CancelledError:
            log.warning("Task has been cancelled")
            run_result = StateType.ABORTED
            raise

        finally:
            await rabbit_mq.post_log_message(
                user_id,
                project_id,
                node_id,
                f"[sidecar]Task completed with result: {run_result.name} [Trial {retry+1}/{max_retries}]",
            )
            if (retry + 1) < max_retries and run_result == StateType.FAILED:
                # try again!
                run_result = StateType.PENDING
            await _set_task_state(connection, project_id, node_id, run_result)
            if run_result == StateType.FAILED:
                # set the successive tasks as ABORTED
                await _set_tasks_state(
                    connection,
                    project_id,
                    nx.bfs_tree(graph, node_id),
                    StateType.ABORTED,
                    offset=1,
                )
示例#18
0
async def db_connection(pg_engine: Engine) -> SAConnection:
    """fixture for connection"""
    async with pg_engine.acquire() as conn:
        yield conn
async def pg_connection(pool: Engine = Depends(pg_pool)) -> SAConnection:
    async with pool.acquire() as connection:
        yield connection
示例#20
0
async def delete_tables(engine: Engine, tables: list) -> None:
    """Deletes tables included in list."""
    async with engine.acquire() as conn:
        for table in reversed(tables):
            delete_table_stmt = DropTable(table.__table__)
            await conn.execute(delete_table_stmt)
示例#21
0
async def create_tables(engine: Engine, tables: list) -> None:
    """Creates all tables in tables list."""
    async with engine.acquire() as conn:
        for table in tables:
            create_table_stmt = CreateTable(table.__table__)
            await conn.execute(create_table_stmt)
示例#22
0
async def raise_if_not_responsive(engine: Engine):
    async with engine.acquire() as conn:
        await conn.execute("SELECT 1 as is_alive")
示例#23
0
async def get_cnx(engine: Engine = Depends(get_engine)):
    # TODO: problem here is retries??
    async with engine.acquire() as conn:
        yield conn
示例#24
0
async def select_shorts(db: Engine):
    async with db.acquire() as conn:
        conn: Cursor
        result = await conn.execute(shorts_table.select())
        data = await result.fetchall()
        return data
async def listen(app: web.Application, db_engine: Engine):
    listen_query = f"LISTEN {DB_CHANNEL_NAME};"
    _LISTENING_TASK_BASE_SLEEPING_TIME_S = 1
    async with db_engine.acquire() as conn:
        await conn.execute(listen_query)

        while True:
            # NOTE: instead of using await get() we check first if the connection was closed
            # since aiopg does not reset the await in such a case (if DB was restarted or so)
            # see aiopg issue: https://github.com/aio-libs/aiopg/pull/559#issuecomment-826813082
            if conn.closed:
                raise ConnectionError("connection with database is closed!")
            if conn.connection.notifies.empty():
                await asyncio.sleep(_LISTENING_TASK_BASE_SLEEPING_TIME_S)
                continue
            notification = conn.connection.notifies.get_nowait()
            log.debug("received update from database: %s",
                      pformat(notification.payload))
            # get the data and the info on what changed
            payload: Dict = json.loads(notification.payload)

            # FIXME: all this should move to rabbitMQ instead of this
            task_data = payload.get("data", {})
            task_changes = payload.get("changes", [])

            if not task_data:
                log.error("task data invalid: %s", pformat(payload))
                continue

            if not task_changes:
                log.error("no changes but still triggered: %s",
                          pformat(payload))
                continue

            project_uuid = task_data.get("project_id", "undefined")
            node_uuid = task_data.get("node_id", "undefined")

            # FIXME: we do not know who triggered these changes. we assume the user had the rights to do so
            # therefore we'll use the prj_owner user id. This should be fixed when the new sidecar comes in
            # and comp_tasks/comp_pipeline get deprecated.
            try:
                # find the user(s) linked to that project
                the_project_owner = await _get_project_owner(
                    conn, project_uuid)

                if any(f in task_changes for f in ["outputs", "run_hash"]):
                    new_outputs = task_data.get("outputs", {})
                    new_run_hash = task_data.get("run_hash", None)
                    await _update_project_outputs(
                        app,
                        the_project_owner,
                        project_uuid,
                        node_uuid,
                        new_outputs,
                        new_run_hash,
                    )

                if "state" in task_changes:
                    new_state = convert_state_from_db(task_data["state"]).value
                    await _update_project_state(app, the_project_owner,
                                                project_uuid, node_uuid,
                                                new_state)

            except projects_exceptions.ProjectNotFoundError as exc:
                log.warning(
                    "Project %s was not found and cannot be updated. Maybe was it deleted?",
                    exc.project_uuid,
                )
                continue
            except projects_exceptions.ProjectOwnerNotFoundError as exc:
                log.warning(
                    "Project owner of project %s could not be found, is the project valid?",
                    exc.project_uuid,
                )
                continue
            except projects_exceptions.NodeNotFoundError as exc:
                log.warning(
                    "Node %s of project %s not found and cannot be updated. Maybe was it deleted?",
                    exc.node_uuid,
                    exc.project_uuid,
                )
                continue
示例#26
0
 async def _get_repo(
     engine: Engine = Depends(_get_db_engine),
 ) -> AsyncGenerator[BaseRepository, None]:
     async with engine.acquire() as conn:
         yield repo_type(conn)