Esempio n. 1
0
    def _get_user_query(cls,
                        guild_id=None,
                        channel_id=None,
                        user_id=None,
                        webhooks=False,
                        include_filtered=False):
        """Retrieves table, filtered by user id"""
        last_msg_at = func.max(UserChannel.last_msg_at).label("last_msg_at")
        total = func.sum(UserChannel.count).label("total")
        rank = func.dense_rank().over(
            order_by=[desc(total), asc(last_msg_at)]).label("rank")

        query = session.query(
            UserChannel.guild_id,
            UserChannel.guild_name,
            UserChannel.user_id,
            UserChannel.user_name,
            last_msg_at,
            total,
            rank,
        )
        query = cls._filter(
            query=query,
            guild_id=guild_id,
            channel_id=channel_id,
            user_id=user_id,
            webhooks=webhooks,
            include_filtered=include_filtered,
        )
        query = query.group_by(UserChannel.guild_id, UserChannel.guild_name,
                               UserChannel.user_id,
                               UserChannel.user_name).order_by("rank")

        return query
Esempio n. 2
0
def _create_split_insert_stmt(
        old_table,
        extracted_table,
        extracted_columns,
        remainder_table,
        remainder_columns,
        remainder_fk_name,
):
    SPLIT_ID = f"{constants.MATHESAR_PREFIX}_split_column_alias"
    extracted_column_names = [col.name for col in extracted_columns]
    remainder_column_names = [col.name for col in remainder_columns]
    split_cte = select(
        [
            old_table,
            func.dense_rank().over(order_by=extracted_columns).label(SPLIT_ID)
        ]
    ).cte()
    cte_extraction_columns = (
        [split_cte.columns[SPLIT_ID]]
        + [split_cte.columns[n] for n in extracted_column_names]
    )
    cte_remainder_columns = (
        [split_cte.columns[SPLIT_ID]]
        + [split_cte.columns[n] for n in remainder_column_names]
    )
    extract_sel = select(
        cte_extraction_columns,
        distinct=True
    )
    extract_ins_cte = (
        extracted_table
        .insert()
        .from_select([constants.ID] + extracted_column_names, extract_sel)
        .returning(literal(1))
        .cte()
    )
    remainder_sel = select(
        cte_remainder_columns,
        distinct=True
    ).where(exists(extract_ins_cte.select()))

    split_ins = (
        remainder_table
        .insert()
        .from_select(
            [remainder_fk_name] + remainder_column_names,
            remainder_sel
        )
    )
    return split_ins
def purge_instance_faults(session, meta, max_instance_faults):

    instance_faults_t = Table('instance_faults', meta, autoload=True)

    log.info("- purging instance faults to at maximum %s per instance",
             max_instance_faults)
    # get the max_instance_faults latest oinstance fault entries per instance and delete all others
    subquery = session.query(
        instance_faults_t,
        func.dense_rank().over(
            order_by=instance_faults_t.c.created_at.desc(),
            partition_by=instance_faults_t.c.instance_uuid).label(
                'rank')).subquery()
    for i in session.query(subquery).filter(
            subquery.c.rank > max_instance_faults).all():
        log.info(
            "- action: deleting instance fault entry for instance %s from %s",
            i.instance_uuid, str(i.created_at))
        purge_instance_faults_q = instance_faults_t.delete().where(
            instance_faults_t.c.id == i.id)
        purge_instance_faults_q.execute()
Esempio n. 4
0
from sqlalchemy import func
from sqlalchemy.orm import Query



process_stats_run_last_success_only = Query(
        [
            models.ProcessRun.process_id, 
            func.max(models.ProcessRun.end_date).label('max_end_date')
        ]
    ).filter(models.ProcessRun.success == True).group_by(models.ProcessRun.process_id).subquery('success_only')

process_stats_run_last_error_only = Query(
        [
            models.ProcessRun, 
            func.dense_rank().over(partition_by=[models.ProcessRun.process_id], order_by=[models.ProcessRun.create_date.desc()]).label('rank')
        ]
    ).filter(models.ProcessRun.success == False).subquery('errors_only')

process_stats_run_last_error_only_rank1 = Query(
        process_stats_run_last_error_only
    ).filter(
        process_stats_run_last_error_only.c.rank == 1
    ).subquery('last_error_only')

process_stats_run_last = Query(
        [
            models.ProcessRun,
            func.dense_rank().over(
                partition_by=[models.ProcessRun.process_id],
                order_by=[models.ProcessRun.create_date.desc()]
Esempio n. 5
0
    def create_baked_query(self, session: Session, opt=None) -> baked.Result:
        """
        Creates SqlAlchemy cached query according to OData filters and joins
        :param session: SqlAlchemy Session object
        :param opt: Options to apply before counting
        :return: SqlAlchemy Query Object
        """
        _baked_query = bakery(lambda s: s.query(self.cls))

        if self.fields:
            pk = inspect(self.cls).primary_key[0].key
            if getattr(self.cls, pk) not in self.fields:
                self.fields += (getattr(self.cls, pk), )
            _baked_query += lambda bq: bq.options(load_only(*self.fields))

        if self.extra_columns:
            _baked_query += lambda bq: bq.add_columns(*self.extra_columns)

        entities = (functools.reduce(lambda a, b: get_type(getattr(a, b)),
                                     x.split('/'), self.cls)
                    for x in self.join)
        self.filter += tuple(r for e in entities if hasattr(e, 'restrictions')
                             for r in getattr(e, 'restrictions'))

        self.join += tuple(e for e in self.expand if e not in self.join)

        for item in self.join:
            if item not in self.aliases:
                entity = functools.reduce(lambda c, p: get_type(getattr(c, p)),
                                          item.split('/'), self.cls)

                self.aliases[item] = aliased(entity,
                                             name=item.replace('/', '_'))

        for j in self.join:

            path = []
            chain = j.split('/')

            def drill_down_relationship(
                    query_entity: Tuple[BakedQuery, Entity],
                    attr_name: str) -> Tuple[BakedQuery, Entity]:

                query, entity = query_entity
                attr = getattr(entity, attr_name)
                path.append(attr_name)
                is_inner = inspect(entity).attrs[attr_name].innerjoin
                path_string = '/'.join(path)
                if path == chain:
                    return query + (lambda bq: bq.join(
                        self.aliases[path_string], attr, isouter=not is_inner)
                                    ), get_type(attr)
                if '/'.join(path) in self.join:
                    return query, get_type(attr)
                else:
                    return query + (lambda bq: bq.join(
                        attr, isouter=not is_inner)), get_type(attr)

            _baked_query, _ = functools.reduce(
                drill_down_relationship, chain,
                (_baked_query + (lambda bq: bq.reset_joinpoint()), self.cls))

        if self.filter:
            _baked_query += lambda bq: bq.filter(*self.filter)

        for x in self.expand:

            path = []
            chain = x.split('/')

            def drill_down_path(option_entity: Tuple[Any, Entity],
                                attr_name: str) -> Tuple[Any, Entity]:
                option, entity = option_entity
                attr = getattr(entity, attr_name)
                path.append(attr_name)

                path_string = '/'.join(path)
                if option is None:
                    if path_string in self.expand:
                        return contains_eager(
                            attr,
                            alias=self.aliases[path_string]), get_type(attr)
                    else:
                        return contains_eager(attr), get_type(attr)
                else:
                    if path_string in self.expand:
                        return option.contains_eager(
                            attr,
                            alias=self.aliases[path_string]), get_type(attr)
                    else:
                        return option.contains_eager(attr), get_type(attr)

            _options, _ = functools.reduce(drill_down_path, chain,
                                           (None, self.cls))

            if _options:
                _baked_query += lambda bq: bq.options(_options)

        if opt:
            _baked_query += lambda bq: opt(bq)

        if self.order_by:
            for field, order in self.order_by:

                if order == "desc":
                    _baked_query += lambda bq: bq.order_by(
                        self.get_order_by_field(field).desc())
                else:
                    _baked_query += lambda bq: bq.order_by(
                        self.get_order_by_field(field).asc())

        if self.use_row_number:
            pk = inspect(self.cls).primary_key[0].key
            if self.start or self.limit:
                row_number_column = func.dense_rank().over(
                    order_by=getattr(self.cls, pk)).label('row_number')
                _baked_query += lambda bq: bq.add_columns(
                    row_number_column).from_self(self.cls, *self.extra_columns)
                if self.start:
                    _baked_query += lambda bq: bq.filter(row_number_column >
                                                         self.start)
                if self.limit:
                    _baked_query += lambda bq: bq.filter(
                        row_number_column <= self.start + self.limit)
        else:
            if self.start:
                _baked_query += lambda bq: bq.offset(self.start)
            if self.limit:
                _baked_query += lambda bq: bq.limit(self.limit)

        return _baked_query(session).params(
            **{f'param_{k}': v
               for k, v in enumerate(self.param_list)})
Esempio n. 6
0
    def create_query(self, session: Session, opt=None) -> Query:
        """
        Creates SqlAlchemy query according to OData filters and joins
        :param session: SqlAlchemy Session object
        :param opt: Options to apply before counting
        :return: SqlAlchemy Query Object
        """

        if self.fields:
            pk = inspect(self.cls).primary_key[0].key
            if getattr(self.cls, pk) not in self.fields:
                self.fields += (getattr(self.cls, pk), )
            _query = session.query(self.cls).options(load_only(*self.fields))
        else:
            _query = session.query(self.cls)

        if self.extra_columns:
            _query = _query.add_columns(*self.extra_columns)

        entities = {
            k: functools.reduce(lambda a, b: get_type(getattr(a, b)),
                                k.split('/'), self.cls)
            for k in self.join
        }

        self.filter += tuple(r for e in entities.values()
                             if hasattr(e, 'restrictions')
                             for r in e.restrictions)

        self.join += tuple(e for e in self.expand if e not in self.join)

        for item in self.join:
            if item not in self.aliases:
                entity = functools.reduce(lambda c, p: get_type(getattr(c, p)),
                                          item.split('/'), self.cls)

                self.aliases[item] = aliased(entity,
                                             name=item.replace('/', '_'))

        for j in self.join:

            path = []
            chain = j.split('/')

            def drill_down_relationship(
                    query_entity: Tuple[Query, Entity],
                    attr_name: str) -> Tuple[Query, Entity]:
                query, entity = query_entity
                attr = getattr(entity, attr_name)
                path.append(attr_name)
                path_string = '/'.join(path)
                is_inner = inspect(entity).attrs[attr_name].innerjoin
                if path == chain:
                    return query.join(self.aliases[path_string],
                                      attr,
                                      isouter=not is_inner), get_type(attr)
                elif '/'.join(path) in self.join:
                    return query, get_type(attr)
                else:
                    return query.join(attr,
                                      isouter=not is_inner), get_type(attr)

            _query, _ = functools.reduce(drill_down_relationship, chain,
                                         (_query.reset_joinpoint(), self.cls))

        if self.filter:
            _query = _query.filter(*self.filter)

        for x in self.expand:

            path = []
            chain = x.split('/')

            def drill_down_path(option_entity: Tuple[Any, Entity],
                                attr_name: str) -> Tuple[Any, Entity]:
                option, entity = option_entity
                attr = getattr(entity, attr_name)
                path.append(attr_name)

                path_string = '/'.join(path)
                if option is None:
                    if path_string in self.expand:
                        return contains_eager(
                            attr,
                            alias=self.aliases[path_string]), get_type(attr)
                    else:
                        return contains_eager(attr), get_type(attr)
                else:
                    if path_string in self.expand:
                        return option.contains_eager(
                            attr,
                            alias=self.aliases[path_string]), get_type(attr)
                    else:
                        return option.contains_eager(attr), get_type(attr)

            _options, _ = functools.reduce(drill_down_path, chain,
                                           (None, self.cls))

            if _options:
                _query = _query.options(_options)

        if opt:
            _query = opt(_query)

        if self.order_by:
            for field, order in self.order_by:

                if order == "desc":
                    _query = _query.order_by(
                        self.get_order_by_field(field).desc())
                else:
                    _query = _query.order_by(
                        self.get_order_by_field(field).asc())

        if self.use_row_number:
            pk = inspect(self.cls).primary_key[0].key
            if self.start or self.limit:
                row_number_column = func.dense_rank().over(
                    order_by=getattr(self.cls, pk)).label('row_number')
                _query = _query.add_columns(row_number_column).from_self(
                    self.cls, *self.extra_columns)
                if self.start:
                    _query = _query.filter(row_number_column > self.start)
                if self.limit:
                    _query = _query.filter(
                        row_number_column <= self.start + self.limit)

        else:
            if self.start:
                _query = _query.offset(self.start)
            if self.limit:
                _query = _query.limit(self.limit)

        if self.logger:
            query_statement = str(_query.statement)
            query_params = _query.statement.compile().params
            self.logger.debug(
                re.sub(r'\s?:(\w+)\s*', r" '{\1}' ",
                       query_statement).format(**query_params))

        return _query
    def _ser(cls,
             to_return=None,
             filter_by=None,
             limit=None,
             offset=None,
             query=None,
             skip_nones=False,
             order_by=None,
             session=None,
             expose_all=False,
             params=None):
        """
                Prepare query and fields to fetch obtain (from it)
                The query only fetches necessary fields.
            :param to_return: list of fields to return
            :param filter_by: dict of SQLAlchemy clause to filter by
            :param limit: maximum amount of objects fetched
            :param offset: offset value for the result
            :param query: optional base query
            :param skip_nones: Skip filter_by entries that have a "None" value
            :param order_by: enforce result ordering, multiple via tuple
            :param session: Explict session to use for query
            :param expose_all: Whether to Return not exposed fields
            :param params: Query parameters
            :return: tuple(query, json_to_serialize)
        """
        assert params is None or isinstance(params, dict)
        assert to_return is None or isinstance(to_return, (list, tuple))

        if to_return is None:
            assert isinstance(cls.default_serialization, tuple)
            to_return = list(cls.default_serialization)

        assert len(to_return) == len(
            set(to_return)), [x for x in to_return if to_return.count(x) > 1]

        # expand relationships to default fields
        expanded = []
        for path in to_return:
            expanded += cls.expand(path)
        to_return = expanded

        # remove not exposed columns
        if expose_all is not True:
            to_return = list(filter(cls._is_exposed_column, to_return))

        # todo: should only expire column that use param
        # remove duplicated and store so we know what to populate
        json_to_populate = list(set(to_return))
        # obtain all columns that need fetching from db
        to_fetch = list(set(cls._get_query_columns(to_return)))

        if query is None:
            query = cls.query
        if session is not None:
            query = query.with_session(session)
        if params is not None:
            query = query.params(**params)
        # ensure that fresh data is loaded
        query = query.populate_existing()
        if filter_by is not None:
            query = cls.filter(filter_by, query, skip_nones=skip_nones)

        # handle consistent ordering and tuple in all cases
        if order_by is None:
            order_by = cls.id
        if isinstance(order_by, tuple):
            if order_by[-1] != cls.id:
                order_by = order_by + (cls.id, )
        else:
            if order_by != cls.id:
                order_by = order_by, cls.id
            else:
                order_by = (order_by, )
        assert isinstance(order_by, tuple)

        # join columns in order_by where necessary
        data = {'query': query}
        order_by = cls._substitute_clause(data, order_by)
        query = data['query']

        # we only need foreign key and request columns
        # Note: Primary keys are loaded automatically by sqlalchemy
        fks = [col.name for col in cls.__table__.columns if col.foreign_keys]
        eager_cols = [col for col in to_fetch if "." not in col]
        to_load = [getattr(cls, e) for e in list(set(fks + eager_cols))]
        assert all(hasattr(e, 'type') for e in to_load)
        query = query.options(load_only(*to_load))
        # only return one line per result model so we can use limit and offset
        query = query.distinct(cls.id)
        dense_rank = func.dense_rank().over(  # remember the actual order
            order_by=order_by).label("dense_rank")
        query = query.add_columns(dense_rank)
        query = query.from_self(cls)
        query = query.order_by(dense_rank)

        if limit is not None:
            query = query.limit(limit)
        if offset is not None:
            query = query.offset(offset)

        query = cls._eager_load(to_fetch, query)

        # for query debugging use
        # import sqlalchemy.dialects.postgresql as postgresql
        # print(query.statement.compile(dialect=postgresql.dialect()))
        # print("===========")

        return query, json_to_populate