Ejemplo n.º 1
0
    def get_statements(self, model_id, date, offset=0, limit=None,
                       sort_by=None, stmt_types=None, min_belief=None,
                       max_belief=None):
        """Load the statements by model and date.

        Parameters
        ----------
        model_id : str
            The standard name of the model to get statements for.
        date : str
            The date when the model was generated.
        offset : int
            The offset to start at.
        limit : int
            The number of statements to return.

        Returns
        -------
        list[indra.statements.Statement]
            A list of statements corresponding to the model and date.
        """
        logger.info(f'Got request to get statements for model {model_id} '
                    f'on date {date} with offset {offset} and limit {limit} '
                    f'and sort by {sort_by}')
        with self.get_session() as sess:
            q = sess.query(Statement.statement_json).filter(
                Statement.model_id == model_id,
                Statement.date == date
            )
            if stmt_types:
                stmt_types = [stmt_type.lower() for stmt_type in stmt_types]
                q = q.filter(
                    func.lower(Statement.statement_json[
                        'type'].astext).in_(stmt_types))
            if min_belief:
                q = q.filter(
                    Statement.statement_json['belief'].astext.cast(
                        Float) >= float(min_belief))
            if max_belief:
                q = q.filter(
                    Statement.statement_json['belief'].astext.cast(
                        Float) <= float(max_belief))
            if sort_by == 'evidence':
                q = q.order_by(nullslast(jsonb_array_length(
                    Statement.statement_json["evidence"]).desc()))
            elif sort_by == 'belief':
                q = q.order_by(
                    nullslast(Statement.statement_json['belief'].desc()))
            elif sort_by == 'paths':
                q = q.order_by(Statement.path_count.desc())
            if offset:
                q = q.offset(offset)
            if limit:
                q = q.limit(limit)
            stmts = stmts_from_json([s for s, in q.all()])
            logger.info(f'Got {len(stmts)} statements')
        return stmts
Ejemplo n.º 2
0
 def ordered_by_rating(limit: int = None,
                       direction: Literal['asc', 'desc'] = 'desc'):
     query = Game.query\
         .join(Rating, Rating.game_id == Game.id, isouter=True)\
         .join(GamePlay, GamePlay.game_id == Game.id, isouter=True)\
         .group_by(Game.id)\
         .order_by(nullslast(desc(func.avg(Rating.rating))), nullslast(desc(func.count(GamePlay.id))), asc(Game.created_at))
     if limit is not None:
         query = query.limit(limit)
     return query
Ejemplo n.º 3
0
def get_events_for_place(place_id: UUID,
                         event_types: List[str] = [],
                         include_null_type: bool = True,
                         limit: int = None,
                         offset: int = None) -> tuple:
    """Get all events that occurred at a place.

    Args:
        place_id: MBID of the place.
        event_types: List of types of events to be fetched. The supported event_types are
        'Concert', 'Festival', 'Convention/Expo', 'Launch event', 'Award ceremony', 'Stage performance', and 'Masterclass/Clinic'.
        include_null_type: Whether to include events with no type.
        limit: Max number of events to return.
        offset: Offset that can be used in conjunction with the limit.

    Returns:
        Tuple containing the list of dictionaries of events and the total count of the events.
        The list of dictionaries of events is ordered by event begin year, begin month, begin date
        begin time, and begin name. In case one of these is set to NULL, it will be ordered last.
    """

    place_id = str(place_id)
    event_types = get_mapped_event_types(event_types)

    with mb_session() as db:
        event_query = db.query(models.Event).outerjoin(models.EventType).\
            options(contains_eager(models.Event.type)).\
            join(models.LinkEventPlace, models.Event.id == models.LinkEventPlace.entity0_id).\
            join(models.Place, models.LinkEventPlace.entity1_id == models.Place.id).\
            filter(models.Place.gid == place_id)

        if include_null_type and event_types:
            event_query = event_query.filter(
                or_(models.Event.type == None,
                    models.EventType.name.in_(event_types)))
        elif event_types:
            event_query = event_query.filter(
                models.EventType.name.in_(event_types))

        event_query = event_query.order_by(
            nullslast(models.Event.begin_date_year.desc()),
            nullslast(models.Event.begin_date_month.desc()),
            nullslast(models.Event.begin_date_day.desc()),
            nullslast(models.Event.time.desc()),
            nullslast(models.Event.name.asc()))
        count = event_query.count()
        events = event_query.limit(limit).offset(offset).all()

        return ([serialize_events(event) for event in events], count)
Ejemplo n.º 4
0
def index():
    new_release = Game.query.order_by(desc(Game.id)).limit(5).all()
    most_popular = Game.query.join(owned_games).group_by(Game.id).order_by(
        func.count(Game.id)).limit(5).all()
    coming_soon = Game.query.filter_by(status="coming soon").limit(5).all()
    top_rated = Game.query.order_by(nullslast(desc(
        Game.rating))).limit(4).all()
    limited_offer = Game.query.filter(
        Game.discount_expirable == True,
        Game.discount_end_date > datetime.now(),
        Game.discount > 0).order_by(Game.discount_end_date).limit(4).all()
    discount = Game.query.filter(Game.discount_expirable == False,
                                 Game.discount > 0).limit(4).all()
    featured_games_setting = WebsiteSetting.query.filter_by(
        setting_group="featured_games").all()
    featured_games = []
    for i in featured_games_setting:
        featured_games.append(Game.query.filter_by(id=i.setting_value).first())
    return render_template("index.html",
                           active_page="index",
                           new_release=new_release,
                           most_popular=most_popular,
                           coming_soon=coming_soon,
                           top_rated=top_rated,
                           limited_offer=limited_offer,
                           discount=discount,
                           featured_games=featured_games)
Ejemplo n.º 5
0
    def query_agps(
            self, page, symbol_like=None, sort_by=None, order=None,
            force_view=False):
        if force_view:
            table = self.agp_view
        elif self.cache_table_exists():
            table = self.cache_table
        else:
            table = self.agp_view

        s = table.select()
        if symbol_like:
            s = s.where(table.c.symbol_name.like(f"%{symbol_like}%"))

        if sort_by is not None:
            if order is None:
                order = "desc"
            sort_by = self._transform_sort_by(sort_by)
            query_order_func = desc if order == "desc" else asc
            s = s.order_by(nullslast(query_order_func(sort_by)))

        if page is not None:
            s = s.limit(self.PAGE_SIZE).offset(self.PAGE_SIZE*(page-1))
        with self.engine.connect() as connection:
            return connection.execute(s).fetchall()
Ejemplo n.º 6
0
 async def list(user_id: str):
     e = Exercise.__table__
     w = Workout.__table__
     query = Select(columns=[*e.c, w.c.date.label('last_workout_date')]) \
         .select_from(e.outerjoin(w)) \
         .where(e.c.user_id == user_id) \
         .where(e.c.is_deleted == false()) \
         .order_by(nullslast(desc(w.c.date)))
     return await db.fetch_all(query)
Ejemplo n.º 7
0
def list_schemas():
    """Return the page which will display the list of schemas."""
    # Order by schema wich validates the most JSON objects.
    hot_schemas = (db.session.query(
        JsonObject.schema_id,
        func.count("*").label("JsonObject_count")).group_by(
            JsonObject.schema_id).subquery())
    schemas = (db.session.query(Schema).outerjoin(
        hot_schemas, (Schema.id == hot_schemas.c.schema_id)).order_by(
            nullslast(desc(hot_schemas.c.JsonObject_count))))
    return render_template("schemas.html", schemas=schemas)
Ejemplo n.º 8
0
def list_organizations():
    """Return the page which will display the list of organizations."""
    # Order by organization wich provides the most JSON objects.
    big_contributors = (db.session.query(
        JsonObject.org_id,
        func.count("*").label("JsonObject_count")).group_by(
            JsonObject.org_id).subquery())
    organizations = (db.session.query(Organization).outerjoin(
        big_contributors,
        (Organization.id == big_contributors.c.org_id)).order_by(
            nullslast(desc(big_contributors.c.JsonObject_count))))
    return render_template("organizations.html", organizations=organizations)
Ejemplo n.º 9
0
def list_organizations():
    """Return the page which will display the list of organizations."""
    # Order by organization wich provides the most JSON objects.
    is_membership_restricted = int(
        request.args.get("is_membership_restricted", 1)) == 1
    big_contributors = (db.session.query(
        JsonObject.org_id,
        func.count("*").label("JsonObject_count")).group_by(
            JsonObject.org_id).subquery())
    if not is_membership_restricted:
        organizations = (
            db.session.query(Organization).filter(
                Organization.is_membership_restricted == False)  # noqa
            .outerjoin(
                big_contributors,
                (Organization.id == big_contributors.c.org_id)).order_by(
                    nullslast(desc(big_contributors.c.JsonObject_count))))
    else:
        organizations = (db.session.query(Organization).outerjoin(
            big_contributors,
            (Organization.id == big_contributors.c.org_id)).order_by(
                nullslast(desc(big_contributors.c.JsonObject_count))))
    return render_template("organizations.html", organizations=organizations)
Ejemplo n.º 10
0
def get_release_groups_for_label(label_mbid, release_types=None, limit=None, offset=None):
    """Get all release groups linked to a label.

    Args:
        label_id (uuid): MBID of the label.
        release_types (list): List of types of release groups to be fetched. The supported release_types are 
        'album', 'single', 'ep', 'broadcast', and 'other'.
        limit (int): Max number of release groups to return.
        offset (int): Offset that can be used in conjunction with the limit.

    Returns:
        Tuple containing the list of dictionaries of release groups and the total count of the release groups.
        The list of dictionaries of release groups is ordered by release year, release month,
        release date, and release name. In case one of these is set to NULL, it will be ordered last.
        List also contains release groups with null type if 'Other' is in the list of release types.
    """
    label_mbid = str(label_mbid)
    includes_data = defaultdict(dict)
    if release_types is None:
        release_types = []
    release_types = get_mapped_release_types(release_types)
    include_null_type = True if "Other" in release_types else False
    with mb_session() as db:
        release_groups_query = _get_release_groups_for_label_query(db, label_mbid, release_types, include_null_type)
        count = release_groups_query.count()
        release_groups = release_groups_query.order_by(
            nullslast(models.ReleaseGroupMeta.first_release_date_year.desc()),
            nullslast(models.ReleaseGroupMeta.first_release_date_month.desc()),
            nullslast(models.ReleaseGroupMeta.first_release_date_day.desc()),
            nullslast(models.ReleaseGroup.name.asc())
        ).limit(limit).offset(offset).all()

        for release_group in release_groups:
            includes_data[release_group.id]['meta'] = release_group.meta
        release_groups = [serialize_release_groups(release_group, includes_data[release_group.id])
                            for release_group in release_groups]
        return release_groups, count
Ejemplo n.º 11
0
    def project_dataframe(
        cls,
        calc_df: Dataframe,
        return_taxons: Dict[TaxonExpressionStr, Taxon],
        physical_data_sources: Set[str],
        order_by: Optional[List[TaxonDataOrder]] = None,
        limit: Optional[int] = None,
        offset: Optional[int] = None,
    ) -> Dataframe:
        """
        Applies in this order:
        - filtering
        - ordering
        - limiting and offsetting
        """
        for order_by_rule in order_by or []:
            if order_by_rule.taxon not in return_taxons:
                raise InvalidRequest(
                    'request.order_by',
                    f'Taxon "{order_by_rule.taxon}" used in order_by clause must be also selected.'
                )

        projected_sql_and_df_columns, final_query = cls._project_columns(
            calc_df.query, calc_df, return_taxons)
        final_query = final_query.select_from(calc_df.query)

        projected_df_columns = Dataframe.dataframe_columns_to_map(
            [df_col for _, df_col in projected_sql_and_df_columns])

        if order_by:
            final_query = final_query.order_by(*[
                nullslast(ORDER_BY_FUNCTIONS[item.type](column(
                    safe_identifier(item.taxon)))) for item in (order_by or [])
            ])

        if limit is not None:
            final_query = final_query.limit(limit)
        if offset is not None:
            final_query = final_query.offset(offset)

        return Dataframe(
            final_query,
            projected_df_columns,
            calc_df.used_model_names,
            used_physical_data_sources=physical_data_sources,
        )
Ejemplo n.º 12
0
    def _order_by(self, query, joins, sort_joins, sort_field, sort_desc):
        """
            Apply order_by to the query

            :param query:
                Query
            :pram joins:
                Current joins
            :param sort_joins:
                Sort joins (properties or tables)
            :param sort_field:
                Sort field
            :param sort_desc:
                Select sort order:
                * True: for descending order
                * False or None: for ascending default order
                * 'LAST': for NULLS LAST
                * 'FIRST': for NULLS FIRST
        """
        if sort_field is not None:
            # Handle joins
            query, joins, alias = self._apply_path_joins(query,
                                                         joins,
                                                         sort_joins,
                                                         inner_join=False)

            column = sort_field if alias is None else getattr(
                alias, sort_field.key)

            if sort_desc is True:
                if isinstance(column, tuple):
                    query = query.order_by(*map(desc, column))
                else:
                    query = query.order_by(desc(column))
            elif sort_desc is False:
                if isinstance(column, tuple):
                    query = query.order_by(*column)
                else:
                    query = query.order_by(column)
            elif sort_desc == 'LAST':
                query = query.order_by(nullslast(desc(column)))
            elif sort_desc == 'FIRST':
                query = query.order_by(nullsfirst(desc(column)))

        return query, joins
Ejemplo n.º 13
0
    def get(self):
        """Fetches all (interactive) pipeline runs.

        These pipeline runs are either pending, running or have already
        completed. Runs are ordered by started time descending.
        """

        query = models.InteractivePipelineRun.query

        # Ability to query a specific runs given the `pipeline_uuid` or
        # `project_uuid` through the URL (using `request.args`).
        if "pipeline_uuid" in request.args and "project_uuid" in request.args:
            query = query.filter_by(
                pipeline_uuid=request.args.get("pipeline_uuid")
            ).filter_by(project_uuid=request.args.get("project_uuid"))
        elif "project_uuid" in request.args:
            query = query.filter_by(project_uuid=request.args.get("project_uuid"))

        runs = query.order_by(nullslast(models.PipelineRun.started_time.desc())).all()
        return {"runs": [run.__dict__ for run in runs]}, 200
Ejemplo n.º 14
0
def index():
    sorter = request.args.get("sorter", None)
    if sorter is None:
        sorter = "random"
        initial_sorting = True
    else:
        initial_sorting = False
    order = request.args.get("order", None)
    criterion = SORTER.get(sorter, DEFAULT_SORTER)
    if order == DEFAULT_ORDER:
        criterion = desc(criterion)

    projects = Project.query.filter(Project.is_active.is_(True)).order_by(
        nullslast(criterion))
    return {
        "projects": projects,
        "sorter": sorter,
        "initial_sorting": initial_sorting,
        "order": order,
        "DEFAULT_ORDER": DEFAULT_ORDER,
    }
Ejemplo n.º 15
0
def get_release_groups_for_artist(artist_id, release_types=None, limit=None, offset=None):
    """Get all release groups linked to an artist.

    Args:
        artist_id (uuid): MBID of the artist.
        release_types (list): List of types of release groups to be fetched.
        limit (int): Max number of release groups to return.
        offset (int): Offset that can be used in conjunction with the limit.

    Returns:
        Tuple containing the list of dictionaries of release groups ordered by release year
        and the total count of the release groups.
    """
    artist_id = str(artist_id)
    includes_data = defaultdict(dict)
    if release_types is None:
        release_types = []
    release_types = [release_type.lower() for release_type in release_types]
    # map release types to their case sensitive name in musicbrainz.release_group_primary_type table in the database
    release_types_mapping = {
        'album': 'Album',
        'single': 'Single',
        'ep': 'EP',
        'broadcast': 'Broadcast',
        'other': 'Other'
    }
    release_types = [release_types_mapping[release_type] for release_type in release_types]
    with mb_session() as db:
        release_groups_query = _get_release_groups_for_artist_query(db, artist_id, release_types)
        count = release_groups_query.count()
        release_groups = release_groups_query.order_by(
            nullslast(models.ReleaseGroupMeta.first_release_date_year.desc())
        ).limit(limit).offset(offset).all()

        for release_group in release_groups:
            includes_data[release_group.id]['meta'] = release_group.meta
        release_groups = ([serialize_release_groups(release_group, includes_data[release_group.id])
                            for release_group in release_groups], count)
        return release_groups
Ejemplo n.º 16
0
def list_reviewer(language_id, sort_key, order):
    """List of reviewers applying filters and sorting"""
    reviewers = (
        Reviewer.query.outerjoin(
            ReviewRequest, Reviewer.id == ReviewRequest.reviewer_id
        )
        .add_columns(
            Reviewer.id.label("id"),
            Reviewer.first_name.label("first_name"),
            Reviewer.last_name.label("last_name"),
            func.count(ReviewRequest.id).label("review_count"),
            func.max(ReviewRequest.review_date).label("last_review"),
        )
        .group_by(Reviewer.id, Reviewer.first_name, Reviewer.last_name)
    )
    if language_id:
        reviewers = reviewers.filter(
            Reviewer.languages.any(ReviewLanguage.id == language_id)
        )
    sorting = {
        "asc": lambda x: nullsfirst(asc(x)),
        "desc": lambda x: nullslast(desc(x)),
    }[order]
    return reviewers.order_by(sorting(sort_key)).all()
Ejemplo n.º 17
0
    def _build_query_window_aggregations(
        self,
        taxon_to_model: Dict[TaxonSlugExpression, HuskyModel],
        ordered_query_joins: Sequence[QueryJoins],
    ) -> Select:
        """
        Generates query for taxons which need window functions for aggregation

        :param taxon_to_model: Map of taxon slugs (key) and models they are coming from (value)
        :param ordered_query_joins: List of joins
        """
        selectors = []
        # generate inner query with window aggregation functions
        for taxon_slug_expression, taxon in sorted(
                self.projection_taxons.items(), key=lambda x: str(x[0])):
            model = taxon_to_model[taxon_slug_expression]
            if (taxon.tel_metadata
                    and taxon.tel_metadata.aggregation_definition
                    and taxon.tel_metadata.aggregation_definition.params
                    and taxon.tel_metadata_aggregation_type
                    in self._AGGREGATION_WINDOW_FUNCTIONS):
                # find the order_by columns
                order_by = []
                window_params = cast(
                    AggregationParamsSortDimension,
                    taxon.tel_metadata.aggregation_definition.params)
                for field in window_params.sort_dimensions:
                    col = taxon_to_model[TaxonSlugExpression(
                        field.taxon)].taxon_sql_accessor(
                            self.ctx, field.taxon)

                    order_by_dir = field.order_by or TaxonOrderType.asc
                    order_by.append(
                        nullslast(ORDER_BY_FUNCTIONS[order_by_dir](
                            literal_column(col))))

                # apply window aggregation functions
                column = self._AGGREGATION_WINDOW_FUNCTIONS[
                    taxon.tel_metadata_aggregation_type](literal_column(
                        model.taxon_sql_accessor(self.ctx, taxon.slug))).over(
                            partition_by=self.get_partition_by_columns(model),
                            order_by=order_by)
            else:
                # otherwise, render the columns "as-is"
                column = literal_column(
                    model.taxon_sql_accessor(self.ctx, taxon.slug))

            selectors.append(column.label(taxon.slug_safe_sql_identifier))

        # add joins to the inner query
        inner_query = select(selectors).select_from(
            self._build_from_joins(ordered_query_joins))

        # apply scope filters to the inner query
        inner_query = ScopeGuard.add_scope_row_filters(
            self.ctx, self.scope, inner_query, self.taxon_model_info_map)

        # update taxon model info map, because we're selecting from outer query and not the inner query
        self._rebuild_taxon_info_map_inner_query()

        # then, we use prepare the outer query on which we can safely apply GROUP BY
        return self._build_selectors(lambda _, taxon_slug: safe_identifier(
            taxon_slug)).select_from(inner_query)
def standings():
    res = Result.query.order_by(nullslast(Result.tot_score.desc()),
                                Result.tot_time).all()
    return render_template('standings.html', results=res)
Ejemplo n.º 19
0
def get_workflows(paginate=None):  # noqa
    r"""Get all workflows.

    ---
    get:
      summary: Returns all workflows.
      description: >-
        This resource is expecting a user UUID. The
        information related to all workflows for a given user will be served
        as JSON
      operationId: get_workflows
      produces:
        - application/json
      parameters:
        - name: user
          in: query
          description: Required. UUID of workflow owner.
          required: true
          type: string
        - name: type
          in: query
          description: Required. Type of workflows.
          required: true
          type: string
        - name: verbose
          in: query
          description: Optional flag to show more information.
          required: false
          type: boolean
        - name: search
          in: query
          description: Filter workflows by name.
          required: false
          type: string
        - name: sort
          in: query
          description: Sort workflows by creation date (asc, desc).
          required: false
          type: string
        - name: status
          in: query
          description: Filter workflows by list of statuses.
          required: false
          type: array
          items:
            type: string
        - name: page
          in: query
          description: Results page number (pagination).
          required: false
          type: integer
        - name: size
          in: query
          description: Number of results per page (pagination).
          required: false
          type: integer
        - name: include_progress
          in: query
          description: Include progress information of the workflows.
          required: false
          type: boolean
        - name: include_workspace_size
          in: query
          description: Include size information of the workspace.
          required: false
          type: boolean
        - name: include_retention_rules
          in: query
          description: Include workspace retention rules of the workflows.
          type: boolean
        - name: workflow_id_or_name
          in: query
          description: Optional analysis UUID or name to filter.
          required: false
          type: string
      responses:
        200:
          description: >-
            Requests succeeded. The response contains the current workflows
            for a given user.
          schema:
            type: object
            properties:
              total:
                type: integer
              items:
                type: array
                items:
                  type: object
                  properties:
                    id:
                      type: string
                    name:
                      type: string
                    status:
                      type: string
                    size:
                      type: object
                      properties:
                        raw:
                          type: number
                        human_readable:
                          type: string
                    user:
                      type: string
                    created:
                      type: string
                    progress:
                      type: object
                    launcher_url:
                      type: string
                      x-nullable: true
          examples:
            application/json:
              [
                {
                  "id": "256b25f4-4cfb-4684-b7a8-73872ef455a1",
                  "name": "mytest.1",
                  "status": "running",
                  "size":{
                    "raw": 10490000,
                    "human_readable": "10 MB"
                  },
                  "user": "******",
                  "created": "2018-06-13T09:47:35.66097",
                  "launcher_url": "https://github.com/reanahub/reana-demo-helloworld.git",
                },
                {
                  "id": "3c9b117c-d40a-49e3-a6de-5f89fcada5a3",
                  "name": "mytest.2",
                  "status": "finished",
                  "size":{
                    "raw": 12580000,
                    "human_readable": "12 MB"
                  },
                  "user": "******",
                  "created": "2018-06-13T09:47:35.66097",
                  "launcher_url": "https://example.org/specs/reana-snakemake.yaml",
                },
                {
                  "id": "72e3ee4f-9cd3-4dc7-906c-24511d9f5ee3",
                  "name": "mytest.3",
                  "status": "created",
                  "size":{
                    "raw": 184320,
                    "human_readable": "180 KB"
                  },
                  "user": "******",
                  "created": "2018-06-13T09:47:35.66097",
                  "launcher_url": "https://zenodo.org/record/1/reana.yaml",
                },
                {
                  "id": "c4c0a1a6-beef-46c7-be04-bf4b3beca5a1",
                  "name": "mytest.4",
                  "status": "created",
                  "size": {
                    "raw": 1074000000,
                    "human_readable": "1 GB"
                  },
                  "user": "******",
                  "created": "2018-06-13T09:47:35.66097",
                  "launcher_url": null,
                }
              ]
        400:
          description: >-
            Request failed. The incoming data specification seems malformed.
        404:
          description: >-
            Request failed. User does not exist.
          examples:
            application/json:
              {
                "message": "User 00000000-0000-0000-0000-000000000000 does not
                            exist"
              }
        500:
          description: >-
            Request failed. Internal controller error.
          examples:
            application/json:
              {
                "message": "Internal workflow controller error."
              }
    """
    try:
        user_uuid = request.args["user"]
        user = User.query.filter(User.id_ == user_uuid).first()
        type_ = request.args.get("type", "batch")
        verbose = json.loads(request.args.get("verbose", "false").lower())
        sort = request.args.get("sort", "desc")
        search = request.args.get("search", "")
        status_list = request.args.get("status", "")
        include_progress = request.args.get("include_progress", verbose)
        include_workspace_size = request.args.get("include_workspace_size", verbose)
        include_retention_rules = request.args.get("include_retention_rules", verbose)
        workflow_id_or_name = request.args.get("workflow_id_or_name")
        if not user:
            return jsonify({"message": "User {} does not exist".format(user_uuid)}), 404
        workflows = []
        query = user.workflows
        if search:
            search = json.loads(search)
            search_val = search.get("name")[0]
            query = query.filter(Workflow.name.ilike("%{}%".format(search_val)))
        if status_list:
            workflow_status = [RunStatus[status] for status in status_list.split(",")]
            query = query.filter(Workflow.status.in_(workflow_status))
        if workflow_id_or_name:
            query = (
                query.filter(Workflow.id_ == workflow_id_or_name)
                if is_uuid_v4(workflow_id_or_name)
                else query.filter(Workflow.name == workflow_id_or_name)
            )
        column_sorted = Workflow.created.desc()
        if sort in ["disk-desc", "cpu-desc"]:
            resource_type = sort.split("-")[0]
            resource = get_default_quota_resource(resource_type)
            query = query.join(
                WorkflowResource,
                and_(
                    Workflow.id_ == WorkflowResource.workflow_id,
                    WorkflowResource.resource_id == resource.id_,
                ),
                isouter=True,
            )
            column_sorted = nullslast(WorkflowResource.quota_used.desc())
        elif sort in ["asc", "desc"]:
            column_sorted = getattr(Workflow.created, sort)()
        pagination_dict = paginate(query.order_by(column_sorted))
        for workflow in pagination_dict["items"]:
            workflow_response = {
                "id": workflow.id_,
                "name": get_workflow_name(workflow),
                "status": workflow.status.name,
                "user": user_uuid,
                "launcher_url": workflow.launcher_url,
                "created": workflow.created.strftime(WORKFLOW_TIME_FORMAT),
                "progress": get_workflow_progress(
                    workflow, include_progress=include_progress
                ),
            }
            if type_ == "interactive" or verbose:
                int_session = workflow.sessions.first()
                if int_session:
                    workflow_response["session_type"] = int_session.type_.name
                    workflow_response["session_uri"] = int_session.path
                    workflow_response["session_status"] = int_session.status.name
                # Skip workflow if type is interactive and there is no session
                elif type_ == "interactive":
                    continue
            empty_disk_usage = {
                "human_readable": "",
                "raw": -1,
            }
            if include_retention_rules:
                rules = workflow.retention_rules.all()
                workflow_response["retention_rules"] = [
                    rule.serialize() for rule in rules
                ]
            if include_workspace_size:
                workflow_response["size"] = (
                    workflow.get_quota_usage()
                    .get("disk", {})
                    .get("usage", empty_disk_usage)
                )
            else:
                workflow_response["size"] = empty_disk_usage
            workflows.append(workflow_response)
        pagination_dict["items"] = workflows
        pagination_dict["user_has_workflows"] = user.workflows.first() is not None
        return jsonify(pagination_dict), 200
    except (ValueError, KeyError):
        return jsonify({"message": "Malformed request."}), 400
    except json.JSONDecodeError:
        return jsonify({"message": "Your request contains not valid JSON."}), 400
    except Exception as e:
        return jsonify({"message": str(e)}), 500
Ejemplo n.º 20
0
    def list(
            session,
            project_id=None,
            org=None,
            limit=100,
            return_kind="objects",
            archived=False,
            date_to=None,  # datetime
            date_from=None,  # datetime
            date_to_string: str = None,
            date_from_string: str = None,
            name: str = None,
            name_match_type:
        str = "ilike",  # substring and helps if case Aa is off
            order_by_class_and_attribute=None,
            order_by_direction=desc,
            public_only=False):
        """


        """

        query = session.query(UserScript)

        # Assume we must either have public script or project id
        if public_only is True:
            query = query.filter(UserScript.is_public == True)
        else:
            query = query.filter(UserScript.project_id == project_id)

        if name:
            if name_match_type == "ilike":
                name_search = "%{}%".format(name)
                query = query.filter(UserScript.name.ilike(name_search))
            else:
                query = query.filter(UserScript.name == name)

        if date_from or date_to:
            if date_from:
                query = query.filter(UserScript.created_time >= date_from)
            if date_to:
                query = query.filter(UserScript.created_time <= date_to)
        else:
            query = regular_methods.regular_query(
                query=query,
                date_from_string=date_from_string,
                date_to_string=date_to_string,
                base_class=UserScript,
                created_time_string='time_updated')

        if archived is False:
            query = query.filter(UserScript.archived == False)

        if order_by_class_and_attribute:
            query = query.order_by(
                nullslast(order_by_direction(order_by_class_and_attribute)))

        if return_kind == "count":
            return query.limit(limit).count()

        if return_kind == "objects":
            return query.limit(limit).all()
Ejemplo n.º 21
0
def sort(query,
         key,
         model,
         aliases=None,
         join_columns=None,
         clear=False,
         hide_null=False,
         index_column=None,
         nulls_last=False):
    """Sort query using string-formatted columns.

    :param query: Original query
    :param options: Sort column name; prepend with "-" for descending sort
    :param model: SQLAlchemy model
    :param join_columns: Mapping of column names to sort and join rules; used
        for sorting on related columns
    :param clear: Clear existing sort conditions
    :param hide_null: Exclude null values on sorted column(s)
    :param index_column:
    :param nulls_last: Sort null values on sorted column(s) last in results;
        Ignored if hide_null is True
    """

    # Start off assuming we are dealing with a sort column, not a sort
    # expression.
    is_expression = False
    expression_field = None
    expression_type = None
    null_sort = None

    if clear:
        query = query.order_by(False)
    # If the query contains multiple entities (i.e., isn't a simple query on a
    # model), looking up the sort key on the model may lead to invalid queries.
    # In this case, use the string name of the sort key.
    sort_model = (model if len(query._entities) == 1
                  and hasattr(query._entities[0], 'mapper') else None)
    column, order, relationship = parse_option(key,
                                               model=sort_model,
                                               aliases=aliases,
                                               join_columns=join_columns,
                                               query=query)

    # Store the text representation (name) of the sorting column in case we
    # swap it for an expression instead.
    if hasattr(column, 'key'):
        column_name = column.key
    else:
        column_name = column

    if model:
        # Check to see if the model has a sort_expressions attribute on it,
        # which contains a dictionary of column mappings to SQL expressions.
        # If the model has this and there is a matching expression for the
        # column, use the expression instead.
        if hasattr(
                model,
                'sort_expressions') and column_name in model.sort_expressions:
            column = model.sort_expressions[column_name]['expression']
            expression_field = model.sort_expressions[column_name]['field']
            expression_type = model.sort_expressions[column_name]['type']
            null_sort = model.sort_expressions[column_name].get(
                'null_sort', model.sort_expressions[column_name]['expression'])
            is_expression = True

    sort_column = order(column)
    if nulls_last and not hide_null:
        query = query.order_by(sa.nullslast(sort_column))
    else:
        query = query.order_by(sort_column)

    if relationship:
        query = query.join(relationship)
    if hide_null:
        query = query.filter(column != None)  # noqa

    return query, (
        column,
        order,
        column_name,
        is_expression,
        expression_field,
        expression_type,
        null_sort,
    )
Ejemplo n.º 22
0
def _nullslast(obj):
    if current_app.config['SQLALCHEMY_DATABASE_URI'].startswith("sqlite"):
        return obj
    else:
        return nullslast(obj)
Ejemplo n.º 23
0
    def query(
        cls,
        select_query: Select,
        taxon_model_info_map: Dict[str, TaxonModelInfo],
        projection_taxons: SlugExprTaxonMap,
        data_source: str,
        order_by: Optional[List[TaxonDataOrder]],
        limit: Optional[int],
        offset: Optional[int],
        used_physical_data_sources: Set[str],
        dimension_templates: Optional[List[SqlFormulaTemplate]] = None,
    ) -> Dataframe:
        """
        Generates the final projected dataframe

        :param select_query: Original query fetching all necessary fields
        :param taxon_model_info_map: Map of taxon slug expression to taxon model info
        :param projection_taxons: List of taxons meant to be projected by the final query
        :param data_source: Virtual data source for this subrequest
        :param order_by: List of clauses for order by
        :param limit: Limit for the query
        :param offset: Offset for the query
        :param dimension_templates: List of dimension templates

        :return: Final dataframe including all requested taxons
        """
        group_by = []
        selectors = []

        projected_df_columns: Dict[TaxonExpressionStr, DataframeColumn] = {}
        for taxon in projection_taxons.values():
            # apply aggregation, if you need to
            agg_type = taxon.tel_metadata_aggregation_type
            if agg_type and agg_type in cls._AGGREGATION_FUNCTIONS_MAP:
                col = cls._AGGREGATION_FUNCTIONS_MAP[agg_type](column(taxon.slug_safe_sql_identifier))
            else:
                col = column(taxon.slug_safe_sql_identifier)

            col = col.label(taxon.slug_safe_sql_identifier)

            # create appropriate dataframe column
            value_quality_type = ValueQuantityType.scalar
            if not taxon.calculation and taxon.slug_expr in taxon_model_info_map:
                value_quality_type = taxon_model_info_map[taxon.slug_expr].quantity_type
            df_column_name = TaxonExpressionStr(taxon.slug)
            projected_df_columns[df_column_name] = DataframeColumn(df_column_name, taxon, value_quality_type)

            # make sure we select this column in the query
            selectors.append(col)

            # check whether this taxon should be in group by clause
            if agg_type in cls._GROUP_BY_AGGREGATION_TYPES:
                group_by.append(col)

        # make sure we select all columns for dimension templates
        for dim_template in dimension_templates or []:
            col = column(dim_template.label)
            selectors.append(col)

            # we should group by all dimension templates
            group_by.append(col)

        # On purpose adding this value to emulate USING ON FALSE => PROD-8136
        selectors.append(literal(data_source).label(HUSKY_QUERY_DATA_SOURCE_COLUMN_NAME))
        # using literal_column here because some database engines do not like grouping by constant
        group_by.append(literal_column(HUSKY_QUERY_DATA_SOURCE_COLUMN_NAME))

        # created this query
        new_query = Select(
            columns=sort_columns(selectors),
            order_by=[nullslast(ORDER_BY_FUNCTIONS[item.type](item.taxon)) for item in (order_by or [])],
            group_by=sort_columns(group_by),
        ).select_from(select_query)

        if limit is not None:
            new_query = new_query.limit(limit)
        if offset is not None:
            new_query = new_query.offset(offset)

        # collect names of all used models
        used_model_names = {
            model_info.model_name for model_info in taxon_model_info_map.values() if model_info.model_name is not None
        }

        return Dataframe(new_query, projected_df_columns, used_model_names, used_physical_data_sources)