Beispiel #1
0
def paginate(select: Select, page_position: PagePosition) -> Select:
    if page_position.sort:
        order_by_clauses = [
            _sort_direction_map[order.direction](order.field)
            for order in page_position.sort.orders
        ]
        select = select.order_by(*order_by_clauses)
    return select.limit(page_position.limit).offset(page_position.offset)
Beispiel #2
0
def limit_query(query: Select, limit: Optional[str] = None, offset: Optional[str] = None) -> Select:
    if limit:
        validators.raise_if_not_int(limit)
        query = query.limit(int(limit))
    if offset:
        validators.raise_if_not_int(offset)
        query = query.offset(int(offset))
    return query
Beispiel #3
0
def paginate(
    query: Select,
    columns: Dict[Column, Any],
    order: Order,
    limit: int,
) -> Select:
    orderer = get_orderer(order)
    comparator = get_comparator(order)

    for column, value in columns.items():
        query = query.order_by(orderer(column))

        if value is not None:
            query = query.where(comparator(column, value))

    return query.limit(limit)
Beispiel #4
0
def _apply_limit(query: Select,
                 args: dict) -> Select:
    """
    If a limit has been supplied by the 'length' parameter (and an offset
    supplied by the 'start' parameter) from a DataTable AJAX request,
    this adds it to the given query. A length of -1 represents no limit or
    offset. The offset index is 0-based.

    :param query: the DataTable SQL query
    :param args: the query parameters from a DataTable AJAX request
    :return: the query with the limit (if it exists) applied
    """
    limit = int(args['length'])
    if limit != -1:
        offset = int(args.get('start', 0))
        query = query.limit(limit).offset(offset)
    return query
Beispiel #5
0
async def get_paginator(request: Request, query: Select, columns):
    """columns: column, sort order, from col to url, from url to col, url pattern"""
    assert len(columns) > 0

    need_reverse = False

    pattern = '_'.join([column[4] for column in columns])

    if 'after' in request.raw_args and re.match(f"^{pattern}$",
                                                request.raw_args['after']):
        cols_vals = request.raw_args['after'].split('_')
        col, order, to_url, from_url, _ = columns[0]
        order_by = [getattr(col, order)()]

        if len(columns) == 1:
            if order == 'desc':
                condition = col <= from_url(cols_vals[0])
            else:
                condition = col >= from_url(cols_vals[0])

        else:
            col2, order2, to_url2, from_url2, _ = columns[1]
            order_by.append(getattr(col2, order2)())

            if order2 == 'desc':
                condition = (col < from_url(cols_vals[0])) \
                            | (col == from_url(cols_vals[0])) & (col2 <= from_url2(cols_vals[1]))
            else:
                condition = (col > from_url(cols_vals[0])) \
                            | (col == from_url(cols_vals[0])) & (col2 >= from_url2(cols_vals[1]))

        query = query.where(condition).order_by(*order_by)

        limit = PAGE_SIZE + 2
        slice_from = 1

    elif 'before' in request.raw_args and re.match(f"^{pattern}$",
                                                   request.raw_args['before']):
        cols_vals = request.raw_args['before'].split('_')
        col, order, to_url, from_url, _ = columns[0]
        order_by = [col.asc() if order == 'desc' else col.desc()]

        if len(columns) == 1:
            if order == 'desc':
                condition = col >= from_url(cols_vals[0])
            else:
                condition = col <= from_url(cols_vals[0])
        else:
            col2, order2, to_url2, from_url2, _ = columns[1]
            order_by.append(col2.asc() if order2 == 'desc' else col2.desc())

            if order2 == 'desc':
                condition = (col > from_url(cols_vals[0])) \
                            | (col == from_url(cols_vals[0])) & (col2 >= from_url2(cols_vals[1]))
            else:
                condition = (col < from_url(cols_vals[0])) \
                            | (col == from_url(cols_vals[0])) & (col2 <= from_url2(cols_vals[1]))

        query = query.where(condition).order_by(*order_by)

        limit = PAGE_SIZE + 2
        slice_from = 1
        need_reverse = True

    else:
        col, order, _, _, _ = columns[0]
        order_by = [getattr(col, order)()]
        if len(columns) > 1:
            col2, order2, _, _, _ = columns[1]
            order_by.append(getattr(col2, order2)())

        query = query.order_by(*order_by)
        limit = PAGE_SIZE + 1
        slice_from = 0

    query = query.limit(limit)

    all_items = await query.gino.all()
    items = all_items[slice_from:PAGE_SIZE + slice_from]

    after = before = None
    if need_reverse:
        items = list(reversed(items))
        after = True
        if len(all_items) == limit:
            before = True

    else:
        if slice_from == 1:
            before = True

        if len(all_items) == limit:
            after = True

    if after:
        after = '_'.join(
            [col[2](getattr(items[-1], col[0].name)) for col in columns])
        after = url_without_qs_param(request.url, 'before', {'after': after})

    if before:
        before = '_'.join(
            [col[2](getattr(items[0], col[0].name)) for col in columns])
        before = url_without_qs_param(request.url, 'after', {'before': before})

    return Paginator(items=items, after=after, before=before)
Beispiel #6
0
    def query(
        cls,
        select_query: Select,
        taxon_model_info_map: Dict[str, TaxonModelInfo],
        projection_taxons: SlugExprTaxonMap,
        data_source: str,
        order_by: Optional[List[TaxonDataOrder]],
        limit: Optional[int],
        offset: Optional[int],
        used_physical_data_sources: Set[str],
        dimension_templates: Optional[List[SqlFormulaTemplate]] = None,
    ) -> Dataframe:
        """
        Generates the final projected dataframe

        :param select_query: Original query fetching all necessary fields
        :param taxon_model_info_map: Map of taxon slug expression to taxon model info
        :param projection_taxons: List of taxons meant to be projected by the final query
        :param data_source: Virtual data source for this subrequest
        :param order_by: List of clauses for order by
        :param limit: Limit for the query
        :param offset: Offset for the query
        :param dimension_templates: List of dimension templates

        :return: Final dataframe including all requested taxons
        """
        group_by = []
        selectors = []

        projected_df_columns: Dict[TaxonExpressionStr, DataframeColumn] = {}
        for taxon in projection_taxons.values():
            # apply aggregation, if you need to
            agg_type = taxon.tel_metadata_aggregation_type
            if agg_type and agg_type in cls._AGGREGATION_FUNCTIONS_MAP:
                col = cls._AGGREGATION_FUNCTIONS_MAP[agg_type](column(taxon.slug_safe_sql_identifier))
            else:
                col = column(taxon.slug_safe_sql_identifier)

            col = col.label(taxon.slug_safe_sql_identifier)

            # create appropriate dataframe column
            value_quality_type = ValueQuantityType.scalar
            if not taxon.calculation and taxon.slug_expr in taxon_model_info_map:
                value_quality_type = taxon_model_info_map[taxon.slug_expr].quantity_type
            df_column_name = TaxonExpressionStr(taxon.slug)
            projected_df_columns[df_column_name] = DataframeColumn(df_column_name, taxon, value_quality_type)

            # make sure we select this column in the query
            selectors.append(col)

            # check whether this taxon should be in group by clause
            if agg_type in cls._GROUP_BY_AGGREGATION_TYPES:
                group_by.append(col)

        # make sure we select all columns for dimension templates
        for dim_template in dimension_templates or []:
            col = column(dim_template.label)
            selectors.append(col)

            # we should group by all dimension templates
            group_by.append(col)

        # On purpose adding this value to emulate USING ON FALSE => PROD-8136
        selectors.append(literal(data_source).label(HUSKY_QUERY_DATA_SOURCE_COLUMN_NAME))
        # using literal_column here because some database engines do not like grouping by constant
        group_by.append(literal_column(HUSKY_QUERY_DATA_SOURCE_COLUMN_NAME))

        # created this query
        new_query = Select(
            columns=sort_columns(selectors),
            order_by=[nullslast(ORDER_BY_FUNCTIONS[item.type](item.taxon)) for item in (order_by or [])],
            group_by=sort_columns(group_by),
        ).select_from(select_query)

        if limit is not None:
            new_query = new_query.limit(limit)
        if offset is not None:
            new_query = new_query.offset(offset)

        # collect names of all used models
        used_model_names = {
            model_info.model_name for model_info in taxon_model_info_map.values() if model_info.model_name is not None
        }

        return Dataframe(new_query, projected_df_columns, used_model_names, used_physical_data_sources)