Beispiel #1
0
    def fast_query(self, categories, start, radius_m):
        """ Experimental fast sql query using only two CTE queries for all requested POIs
            This approach was tested (EXPLAIN ANALYZE) to be faster than using osm_poi because the osm_poi view uses UNION instead of UNION ALL (needs re-sort)
                WITH my_cte AS ( ... our cte with radius constraint on osm_point and osm_polygon with UNION ALL ) SELECT ... UNION SELECT ... UNION SELECT ...
        """

        from sqlalchemy import select, literal, union_all
        from app import db
        loc = geoalchemy2.shape.from_shape(start, 4326).ST_Transform(Point.SRID)
        lookup = {hash(c.original_id): c for c in categories}

        q_points = select([Point.name, Point.osm_id, Point.way, Point.tags, Point.way.ST_Transform(4326).ST_X().label(
            'lon'), Point.way.ST_Transform(4326).ST_Y().label('lat'), literal(True).label('is_point')]).where(Point.way.ST_DWithin(loc, radius_m))
        q_polygons = select([Polygon.name, Polygon.osm_id, Polygon.way, Polygon.tags, Polygon.way.ST_Transform(4326).ST_Centroid().ST_X().label(
            'lon'), Polygon.way.ST_Transform(4326).ST_Centroid().ST_Y().label('lat'), literal(False).label('is_point')]).where(Polygon.way.ST_DWithin(loc, radius_m))

        cte = union_all(q_points, q_polygons).cte()

        unions = []
        for id_hash, category in lookup.iteritems():
            cond = category._get_condition(cte.c)
            query = select([cte.c.name, cte.c.osm_id, cte.c.lon, cte.c.lat, cte.c.is_point, literal(id_hash).label('c_id')]).where(cond)
            unions.append(query)
        inner_query = union_all(*unions)
        results = db.session.execute(inner_query).fetchall()

        points = []
        for name, osm_id, lon, lat, is_point, cat_id in results:
            points.append(OsmPoi(lookup[cat_id], name, lon, lat, osm_id, is_point))

        return points
Beispiel #2
0
    def get_next_to_translate(self, session):
        """ Use the milestones and priority to find the next description to translate """
        # This is the query we want:
        # select description_id from (
        #         select description_id, 50 as score from languages_tb join description_milestone_tb on (milestone_high=milestone) where language = 'nl'
        # union all
        #         select description_id, 30 from languages_tb join description_milestone_tb on (milestone_medium=milestone) where language = 'nl'
        # union all
        #         select description_id, 10 from languages_tb join description_milestone_tb on (milestone_low=milestone) where language = 'nl'
        # union all
        #         select description_id, prioritize from description_tb
        # ) x
        # where not exists (select 1 from translation_tb where translation_tb.description_id = x.description_id)
        # group by description_id order by sum(score) desc
        # limit 1;
        lang_cte = session.query(Languages).filter_by(language=self.language).cte("language")

        prio = session.query(Description.description_id, Description.prioritize)
        high = session.query(DescriptionMilestone.description_id, literal(50).label("prioritize")).join(lang_cte, lang_cte.c.milestone_high==DescriptionMilestone.milestone)
        medium = session.query(DescriptionMilestone.description_id, literal(30).label("prioritize")).join(lang_cte, lang_cte.c.milestone_medium==DescriptionMilestone.milestone)
        low = session.query(DescriptionMilestone.description_id, literal(10).label("prioritize")).join(lang_cte, lang_cte.c.milestone_low==DescriptionMilestone.milestone)

        prio_cte = union_all(prio, high, medium, low).cte()

        q = session.query(prio_cte.c.description_tb_description_id). \
                    filter(~exists([1], Translation.description_id == prio_cte.c.description_tb_description_id)). \
                    filter(~exists([1], PendingTranslation.description_id == prio_cte.c.description_tb_description_id)). \
                    group_by(prio_cte.c.description_tb_description_id). \
                    order_by(func.sum(prio_cte.c.description_tb_prioritize).desc())

        row = q.first()
        if row:
            return row[0]
def _queryNewsVolumes(aggStartDatetime, aggStopDatetime):
  """ Query the database for the counts of security releases+headlines for each
  company that were detected during the specified time window.

  :param aggStartDatetime: inclusive start of aggregation interval as
    UTC datetime
  :param aggStopDatetime: non-inclusive upper bound of aggregation interval as
    UTC datetime
  :returns: a sparse sequence of two-tuples: (symbol, count); companies that
    have no detected news in the given aggregation period will be absent from
    the result.
  """
  headlineSel = sql.select(
    [schema.xigniteSecurityHeadline.c.symbol.label("symbol")]
    ).where(
      (schema.xigniteSecurityHeadline.c.discovered_at >= aggStartDatetime) &
      (schema.xigniteSecurityHeadline.c.discovered_at < aggStopDatetime))

  releaseSel = sql.select(
    [schema.xigniteSecurityRelease.c.symbol]
    ).where(
      (schema.xigniteSecurityRelease.c.discovered_at >= aggStartDatetime) &
      (schema.xigniteSecurityRelease.c.discovered_at < aggStopDatetime))

  allNewsUnion = sql.union_all(headlineSel, releaseSel)

  aggSel = sql.select(
    ["symbol", sql.func.count("symbol").label("sym_count")]
    ).select_from(allNewsUnion.alias("union_of_tables")
    ).group_by("symbol")

  return collectorsdb.engineFactory().execute(aggSel).fetchall()
Beispiel #4
0
def _queryNewsVolumes(aggStartDatetime, aggStopDatetime):
    """ Query the database for the counts of security releases+headlines for each
  company that were detected during the specified time window.

  :param aggStartDatetime: inclusive start of aggregation interval as
    UTC datetime
  :param aggStopDatetime: non-inclusive upper bound of aggregation interval as
    UTC datetime
  :returns: a sparse sequence of two-tuples: (symbol, count); companies that
    have no detected news in the given aggregation period will be absent from
    the result.
  """
    headlineSel = sql.select([
        schema.xigniteSecurityHeadline.c.symbol.label("symbol")
    ]).where(
        (schema.xigniteSecurityHeadline.c.discovered_at >= aggStartDatetime)
        & (schema.xigniteSecurityHeadline.c.discovered_at < aggStopDatetime))

    releaseSel = sql.select([schema.xigniteSecurityRelease.c.symbol]).where(
        (schema.xigniteSecurityRelease.c.discovered_at >= aggStartDatetime)
        & (schema.xigniteSecurityRelease.c.discovered_at < aggStopDatetime))

    allNewsUnion = sql.union_all(headlineSel, releaseSel)

    aggSel = sql.select([
        "symbol", sql.func.count("symbol").label("sym_count")
    ]).select_from(allNewsUnion.alias("union_of_tables")).group_by("symbol")

    return collectorsdb.engineFactory().execute(aggSel).fetchall()
Beispiel #5
0
    def get_scheduled_package(self):
        incomplete_builds = self.db.query(Build.package_id)\
                                .filter(Build.state == Build.RUNNING)
        queries = self.get_priority_queries().values()
        union_query = union_all(*queries).alias('un')
        pkg_id = union_query.c.pkg_id
        current_priority = cast(func.sum(union_query.c.priority),
                                Integer).label('curr_priority')
        priorities = self.db.query(pkg_id, current_priority)\
                            .group_by(pkg_id).subquery()
        prioritized = self.db.query(Package.id, priorities.c.curr_priority)\
                             .join(priorities,
                                   Package.id == priorities.c.pkg_id)\
                             .filter((Package.resolved == True) |
                                     (Package.resolved == None))\
                             .filter(Package.id.notin_(
                                 incomplete_builds.subquery()))\
                             .filter(Package.ignored == False)\
                             .order_by(priorities.c.curr_priority.desc())\
                             .all()

        if not prioritized or incomplete_builds.count() >= self.max_builds:
            return None

        self.db.rollback()
        self.lock_package_table()
        # pylint: disable=E1101
        self.db.execute(
            Package.__table__.update().values(current_priority=case(
                prioritized, value=Package.id, else_=null())))
Beispiel #6
0
    def test_distinct(self):
        mapper(Item, items)
        mapper(Order,
               orders,
               properties={
                   'items': relation(Item, secondary=order_items, lazy=True)
               })
        mapper(User,
               users,
               properties={
                   'addresses': relation(mapper(Address, addresses),
                                         lazy=True),
                   'orders': relation(Order, lazy=True)
               })

        sess = create_session()
        q = sess.query(User)

        # use a union all to get a lot of rows to join against
        u2 = users.alias('u2')
        s = sa.union_all(u2.select(use_labels=True),
                         u2.select(use_labels=True),
                         u2.select(use_labels=True)).alias('u')
        print[key for key in s.c.keys()]
        l = q.filter(s.c.u2_id == User.id).distinct().order_by(User.id).all()
        eq_(self.static.user_all_result, l)
Beispiel #7
0
def get_queues():
    queue_names = db.session.query(DataSource.queue_name).distinct()
    scheduled_queue_names = db.session.query(
        DataSource.scheduled_queue_name).distinct()
    query = db.session.execute(union_all(queue_names, scheduled_queue_names))

    return ['celery'] + [row[0] for row in query]
  def _similar_obj_assessment(cls, type_, id_):
    """Find similar Assessments for object.

    Args:
        type_: Object type.
        id_: Object id.

    Returns:
        SQLAlchemy query that yields results [(similar_id,)] - the id of
        similar objects.
    """
    from ggrc.models import all_models
    # Find objects directly mapped to Snapshot of base object
    # Object1 <-> Snapshot of Object1 <-> Object2
    similar_queries = cls.mapped_to_obj_snapshot(cls.__name__, id_)

    # Find objects mapped to Snapshot of base object through another object
    # Object1 <-> Object2 <-> Snapshot of Object2 <-> Object3
    mapped_obj = cls.mapped_objs(cls.__name__, id_, True)
    similar_queries += cls.mapped_to_obj_snapshot(
        mapped_obj.c.obj_type, mapped_obj.c.obj_id
    )
    similar_objs = sa.union_all(*similar_queries).alias("similar_objs")
    return db.session.query(similar_objs.c.similar_id).join(
        all_models.Assessment,
        sa.and_(
            all_models.Assessment.assessment_type == cls.__name__,
            all_models.Assessment.id == similar_objs.c.similar_id,
        )
    ).filter(
        similar_objs.c.similar_type == type_,
    )
    def test_distinct(self):
        users, items, order_items, orders, Item, User, Address, Order, addresses = (
            self.tables.users,
            self.tables.items,
            self.tables.order_items,
            self.tables.orders,
            self.classes.Item,
            self.classes.User,
            self.classes.Address,
            self.classes.Order,
            self.tables.addresses,
        )

        mapper(Item, items)
        mapper(Order, orders, properties={"items": relationship(Item, secondary=order_items, lazy="select")})
        mapper(
            User,
            users,
            properties={
                "addresses": relationship(mapper(Address, addresses), lazy="select"),
                "orders": relationship(Order, lazy="select"),
            },
        )

        sess = create_session()
        q = sess.query(User)

        # use a union all to get a lot of rows to join against
        u2 = users.alias("u2")
        s = sa.union_all(u2.select(use_labels=True), u2.select(use_labels=True), u2.select(use_labels=True)).alias("u")
        l = q.filter(s.c.u2_id == User.id).order_by(User.id).distinct().all()
        eq_(self.static.user_all_result, l)
Beispiel #10
0
def filter_project_project_id(q, value, info):
    """This is a special case; because project does not have a stored
    ``project_id``, we have to parse the id walk to program check the
    program.name

    """

    project_ids = value
    if not isinstance(project_ids, (list, tuple)):
        project_ids = [project_ids]

    subqs = []
    for project_id in project_ids:
        split = project_id.split('-', 1)
        if len(split) == 2:
            program_name, project_code = split
            subq = q.props(code=project_code)\
                    .path('programs')\
                    .props(name=program_name)
            subqs.append(subq)
    if not subqs:
        q = q.filter(sa.sql.false())
    else:
        q = capp.db.nodes(q.entity()).select_entity_from(
            sa.union_all(*[sq.subquery().select() for sq in subqs]))

    return q
Beispiel #11
0
    def _similar_asmnt_issue(cls, type_, id_):
        """Find similar Issues for Assessment.

    Args:
        type_: Assessment type.
        id_: Assessment id.

    Returns:
        SQLAlchemy query that yields results [(similar_id,)] - the id of
        similar objects.
    """
        mapped_obj = cls.mapped_to_assessment([id_]).subquery()
        similar_queries = cls.mapped_to_obj_snapshot(mapped_obj.c.obj_type,
                                                     mapped_obj.c.obj_id)
        mapped_related = cls.mapped_objs(mapped_obj.c.obj_type,
                                         mapped_obj.c.obj_id)
        similar_queries.append(
            db.session.query(
                mapped_related.c.obj_id.label("similar_id"),
                mapped_related.c.obj_type.label("similar_type"),
                mapped_related.c.base_type.label("related_type"),
            ))
        similar_objs = sa.union_all(*similar_queries).alias("scoped_similar")
        return db.session.query(similar_objs.c.similar_id).filter(
            similar_objs.c.similar_type == type_, )
Beispiel #12
0
    def build_query(self, qualified_bundles):
        """Create a query from the qualified bundles."""
        stmts = []
        for tup in qualified_bundles:
            (
                transfer_id,
                date,
                delta,
                movement_ids,
            ) = tup
            if not stmts:
                # Apply column types and labels to the first row.
                stmts.append(
                    select([
                        cast(literal(transfer_id),
                             String).label('transfer_id'),
                        cast(literal(date), Date).label('date'),
                        cast(literal(delta), Numeric).label('delta'),
                        array(movement_ids,
                              type_=BigInteger).label('movement_ids'),
                    ]))
            else:
                # The column types and labels in the remaining rows are
                # inferred from the first row.
                stmts.append(
                    select([
                        literal(transfer_id),
                        literal(date),
                        literal(delta),
                        array(movement_ids),
                    ]))

        query = union_all(*stmts)
        return query
Beispiel #13
0
def update():
    g.user.info = json.loads(g.user.info)

    #get oldest and lateset status in database
    query_latest = db.session.query(WeiboList).filter(WeiboList.user_uid == g.user.uid)\
        .order_by(WeiboList.created_at.desc()).limit(1).subquery().select()
    query_oldest = db.session.query(WeiboList).filter(WeiboList.user_uid == g.user.uid)\
        .order_by(WeiboList.created_at).limit(1).subquery().select()
    query = db.session.query(WeiboList).select_from(union_all(query_latest,query_oldest)).order_by(WeiboList.created_at)
    records = query.all()
    oldest_datetime = records[0].created_at
    latest_datetime = records[1].created_at
    latest_uid = records[1].uid

    # get total count in database
    total_count_in_database = db.session.query(func.count(WeiboList)) \
        .filter(WeiboList.user_uid == g.user.uid).first()[0]

    # get total count of update status
    token = json.loads(g.user.token)
    client.set_access_token(token['access_token'], token['expires'])
    statuses = client.statuses.user_timeline.get(count=10, page=1, since_id=latest_uid)
    total_count = statuses['total_number']

    page_info = {
        "oldest_date": oldest_datetime.date().isoformat(),
        "latest_date": latest_datetime.date().isoformat(),
        "total_count_in_database": total_count_in_database,
        "total_count_for_update": total_count - total_count_in_database
    }

    return render_template('update.html', user=g.user, current_navi="update", page_info=page_info)
Beispiel #14
0
    def test_distinct(self):
        users, items, order_items, orders, Item, User, Address, Order, addresses = (
            self.tables.users, self.tables.items, self.tables.order_items,
            self.tables.orders, self.classes.Item, self.classes.User,
            self.classes.Address, self.classes.Order, self.tables.addresses)

        mapper(Item, items)
        mapper(Order,
               orders,
               properties={
                   'items':
                   relationship(Item, secondary=order_items, lazy='select')
               })
        mapper(User,
               users,
               properties={
                   'addresses':
                   relationship(mapper(Address, addresses), lazy='select'),
                   'orders':
                   relationship(Order, lazy='select')
               })

        sess = create_session()
        q = sess.query(User)

        # use a union all to get a lot of rows to join against
        u2 = users.alias('u2')
        s = sa.union_all(u2.select(use_labels=True),
                         u2.select(use_labels=True),
                         u2.select(use_labels=True)).alias('u')
        l = q.filter(s.c.u2_id == User.id).order_by(User.id).distinct().all()
        eq_(self.static.user_all_result, l)
Beispiel #15
0
    def _similar_obj_assessment(cls, type_, id_):
        """Find similar Assessments for object.

    Args:
        type_: Object type.
        id_: Object id.

    Returns:
        SQLAlchemy query that yields results [(similar_id,)] - the id of
        similar objects.
    """
        from ggrc.models import all_models
        # Find objects directly mapped to Snapshot of base object
        # Object1 <-> Snapshot of Object1 <-> Object2
        similar_queries = cls.mapped_to_obj_snapshot(cls.__name__, id_)

        # Find objects mapped to Snapshot of base object through another object
        # Object1 <-> Object2 <-> Snapshot of Object2 <-> Object3
        mapped_obj = cls.mapped_objs(cls.__name__, id_, True)
        similar_queries += cls.mapped_to_obj_snapshot(mapped_obj.c.obj_type,
                                                      mapped_obj.c.obj_id)
        similar_objs = sa.union_all(*similar_queries).alias("similar_objs")
        return db.session.query(similar_objs.c.similar_id).join(
            all_models.Assessment,
            sa.and_(
                all_models.Assessment.assessment_type == cls.__name__,
                all_models.Assessment.id == similar_objs.c.similar_id,
            )).filter(similar_objs.c.similar_type == type_, )
Beispiel #16
0
    def main(self):
        incomplete_builds = self.db.query(Build.package_id)\
                                .filter(Build.state == Build.RUNNING)
        if incomplete_builds.count() >= self.max_builds:
            return
        queries = self.get_priority_queries().values()
        union_query = union_all(*queries).alias('un')
        pkg_id = union_query.c.pkg_id
        current_priority = func.sum(union_query.c.priority)\
                               .label('curr_priority')
        candidates = self.db.query(pkg_id, current_priority)\
                            .having(current_priority >=
                                    self.priority_threshold)\
                            .group_by(pkg_id).subquery()
        to_schedule = self.db.query(Package, candidates.c.curr_priority)\
                             .join(candidates,
                                   Package.id == candidates.c.pkg_id)\
                             .filter((Package.resolved == True) |
                                     (Package.resolved == None))\
                             .filter(Package.id.notin_(
                                 incomplete_builds.subquery()))\
                             .filter(Package.ignored == False)\
                             .order_by(candidates.c.curr_priority.desc())\
                             .first()

        if to_schedule:
            if util.get_koji_load(self.koji_session) > self.load_threshold:
                return
            package, priority = to_schedule
            self.log.info('Scheduling build for {}, priority {}'
                          .format(package.name, priority))
            self.backend.submit_build(package)
            self.db.commit()
    def test_distinct(self):
        users, items, order_items, orders, \
            Item, User, Address, Order, addresses = (
                self.tables.users,
                self.tables.items,
                self.tables.order_items,
                self.tables.orders,
                self.classes.Item,
                self.classes.User,
                self.classes.Address,
                self.classes.Order,
                self.tables.addresses)

        mapper(Item, items)
        mapper(Order, orders, properties={
            'items': relationship(Item, secondary=order_items, lazy='select')
        })
        mapper(User, users, properties={
            'addresses': relationship(
                mapper(Address, addresses), lazy='select'),
            'orders': relationship(Order, lazy='select')
        })

        sess = create_session()
        q = sess.query(User)

        # use a union all to get a lot of rows to join against
        u2 = users.alias('u2')
        s = sa.union_all(
            u2.select(use_labels=True),
            u2.select(use_labels=True), u2.select(use_labels=True)).alias('u')
        result = q.filter(s.c.u2_id == User.id).order_by(User.id).distinct() \
            .all()
        eq_(self.static.user_all_result, result)
  def _similar_asmnt_issue(cls, type_, id_):
    """Find similar Issues for Assessment.

    Args:
        type_: Assessment type.
        id_: Assessment id.

    Returns:
        SQLAlchemy query that yields results [(similar_id,)] - the id of
        similar objects.
    """
    mapped_obj = cls.mapped_to_assessment([id_]).subquery()
    similar_queries = cls.mapped_to_obj_snapshot(
        mapped_obj.c.obj_type, mapped_obj.c.obj_id
    )
    mapped_related = cls.mapped_objs(
        mapped_obj.c.obj_type, mapped_obj.c.obj_id
    )
    similar_queries.append(
        db.session.query(
            mapped_related.c.obj_id.label("similar_id"),
            mapped_related.c.obj_type.label("similar_type"),
            mapped_related.c.base_type.label("related_type"),
        )
    )
    similar_objs = sa.union_all(*similar_queries).alias("scoped_similar")
    return db.session.query(similar_objs.c.similar_id).filter(
        similar_objs.c.similar_type == type_,
    )
Beispiel #19
0
    def test_union_all_limit_offset(self):
        emp, dep, conn = self._emp_d_fixture()

        s1 = select([emp.c.name])
        s2 = select([dep.c.name])
        u1 = union_all(s1, s2).order_by(emp.c.name).limit(3).offset(2)
        r = self._exec_stmt(conn, u1)
        eq_(r.fetchall(), [('Sales', ), ('ed', ), ('jack', )])
Beispiel #20
0
    def union(self, tables):
        from sqlalchemy import union_all

        if type(tables) is list:
            datas = [self.data] + map(lambda x: x.data, tables)
        else:
            datas = [self.data, tables.data]
        return Table(self.ctx, gen_table_name(), union_all(*datas))#, self.data.c)
    def test_distinct(self):
        (
            users,
            items,
            order_items,
            orders,
            Item,
            User,
            Address,
            Order,
            addresses,
        ) = (
            self.tables.users,
            self.tables.items,
            self.tables.order_items,
            self.tables.orders,
            self.classes.Item,
            self.classes.User,
            self.classes.Address,
            self.classes.Order,
            self.tables.addresses,
        )

        mapper(Item, items)
        mapper(
            Order,
            orders,
            properties={
                "items": relationship(
                    Item, secondary=order_items, lazy="select"
                )
            },
        )
        mapper(
            User,
            users,
            properties={
                "addresses": relationship(
                    mapper(Address, addresses), lazy="select"
                ),
                "orders": relationship(Order, lazy="select"),
            },
        )

        sess = create_session()
        q = sess.query(User)

        # use a union all to get a lot of rows to join against
        u2 = users.alias("u2")
        s = sa.union_all(
            u2.select(use_labels=True),
            u2.select(use_labels=True),
            u2.select(use_labels=True),
        ).alias("u")
        result = (
            q.filter(s.c.u2_id == User.id).order_by(User.id).distinct().all()
        )
        eq_(self.static.user_all_result, result)
Beispiel #22
0
    def get(self):
        query = self.get_argument("query", "")
        offset = int(self.get_argument("offset", 0))
        limit = int(self.get_argument("limit", 100))
        if limit > 9000:
            limit = 9000

        groups = (self.session.query(
            label("type", literal("Group")),
            label("id", Group.id),
            label("name", Group.groupname),
        ).filter(Group.enabled == True,
                 Group.groupname.like("%{}%".format(query))).subquery())

        permission_query = query.split("=")[0] if "=" in query else query
        permissions = (self.session.query(
            label("type", literal("Permission")),
            label("id", Permission.id),
            label("name", Permission.name),
        ).filter(Permission.enabled == True,
                 Permission.name.like(
                     "%{}%".format(permission_query))).subquery())

        users = (self.session.query(label("type", literal("User")),
                                    label("id", User.id),
                                    label("name", User.username)).filter(
                                        User.enabled == True,
                                        User.username.like(
                                            "%{}%".format(query))).subquery())

        results_query = self.session.query("type", "id",
                                           "name").select_entity_from(
                                               union_all(
                                                   users.select(),
                                                   permissions.select(),
                                                   groups.select()))
        total = results_query.count()
        results = results_query.offset(offset).limit(limit).all()

        if len(results) == 1:
            result = results[0]

            encoded_params = ""
            if result.type.lower() == "permission" and "=" in query:
                encoded_params = "?" + urlencode(
                    {"argument": query.split("=", 1)[1]})

            base_url = "/{}s/{}".format(result.type.lower(), result.name)
            return self.redirect(base_url + encoded_params)

        self.render(
            "search.html",
            results=results,
            search_query=query,
            offset=offset,
            limit=limit,
            total=total,
        )
  def mapped_to_assessment(cls, related_ids):
    """Collect objects that have snapshot mapped to assessment.

    Args:
        related_ids: List of Assessment ids.

    Returns:
        SQLAlchemy query with id and type of found
        objects [(obj_id, obj_type)].
    """
    from ggrc.models import all_models
    asmnt = all_models.Assessment

    objects_mapped = sa.union_all(
        db.session.query(
            Snapshot.child_id.label("obj_id"),
            asmnt.assessment_type.label("obj_type"),
        ).join(
            Relationship,
            sa.and_(
                Relationship.source_id == Snapshot.id,
                Relationship.source_type == Snapshot.__name__,
            )
        ).join(
            asmnt,
            sa.and_(
                Relationship.destination_type == asmnt.__name__,
                Relationship.destination_id == asmnt.id,
            )
        ).filter(
            asmnt.id.in_(related_ids),
            Snapshot.child_type == asmnt.assessment_type,
        ),
        db.session.query(
            Snapshot.child_id.label("obj_id"),
            asmnt.assessment_type.label("obj_type"),
        ).join(
            Relationship,
            sa.and_(
                Relationship.destination_id == Snapshot.id,
                Relationship.destination_type == Snapshot.__name__,
            )
        ).join(
            asmnt,
            sa.and_(
                Relationship.source_type == asmnt.__name__,
                Relationship.source_id == asmnt.id,
            )
        ).filter(
            asmnt.id.in_(related_ids),
            Snapshot.child_type == asmnt.assessment_type,
        )
    ).alias("objects_mapped")
    return db.session.query(
        objects_mapped.c.obj_id.label("obj_id"),
        objects_mapped.c.obj_type.label("obj_type")
    )
Beispiel #24
0
    def test_union_all_heterogeneous_types(self):
        emp, dep, conn = self._emp_d_fixture()

        s1 = select([emp.c.name, emp.c.fullname]).where(emp.c.name == 'jack')
        s2 = select([dep.c.dep_id, dep.c.name])
        u1 = union_all(s1, s2)
        r = self._exec_stmt(conn, u1)
        eq_(r.fetchall(), [('jack', 'Jack Smith'), (1, 'Engineering'),
                           (2, 'Accounting'), (3, 'Sales')])
Beispiel #25
0
def _activities_union_all(*qlist):
    '''
    Return union of two or more queries sorted by timestamp,
    and remove duplicates
    '''
    import ckan.model as model
    return model.Session.query(model.Activity).select_entity_from(
        union_all(*[q.subquery().select()
                    for q in qlist])).distinct(model.Activity.timestamp)
Beispiel #26
0
    async def test_unique_all(self, async_engine):
        users = self.tables.users
        async with async_engine.connect() as conn:
            result = await conn.stream(
                union_all(select(users),
                          select(users)).order_by(users.c.user_id))

            all_ = await result.unique().all()
            eq_(all_, [(i, "name%d" % i) for i in range(1, 20)])
Beispiel #27
0
def _activities_union_all(*qlist: QActivity) -> QActivity:
    """
    Return union of two or more activity queries sorted by timestamp,
    and remove duplicates
    """
    q: QActivity = (model.Session.query(Activity).select_entity_from(
        union_all(*[q.subquery().select()
                    for q in qlist])).distinct(Activity.timestamp))
    return q
Beispiel #28
0
    def test_union_all(self):
        e = union_all(select([t1.c.col3]), union(select([t1.c.col3]), select([t1.c.col3])))

        wanted = [("aaa",), ("aaa",), ("bbb",), ("bbb",), ("ccc",), ("ccc",)]
        found1 = self._fetchall_sorted(e.execute())
        eq_(found1, wanted)

        found2 = self._fetchall_sorted(e.alias("foo").select().execute())
        eq_(found2, wanted)
Beispiel #29
0
  def mapped_to_assessment(cls, related_ids):
    """Collect objects that have snapshot mapped to assessment.

    Args:
        related_ids: List of Assessment ids.

    Returns:
        SQLAlchemy query with id and type of found
        objects [(obj_id, obj_type)].
    """
    from ggrc.models import all_models
    asmnt = all_models.Assessment

    objects_mapped = sa.union_all(
        db.session.query(
            Snapshot.child_id.label("obj_id"),
            asmnt.assessment_type.label("obj_type"),
        ).join(
            Relationship,
            sa.and_(
                Relationship.source_id == Snapshot.id,
                Relationship.source_type == Snapshot.__name__,
            )
        ).join(
            asmnt,
            sa.and_(
                Relationship.destination_type == asmnt.__name__,
                Relationship.destination_id == asmnt.id,
            )
        ).filter(
            asmnt.id.in_(related_ids),
            Snapshot.child_type == asmnt.assessment_type,
        ),
        db.session.query(
            Snapshot.child_id.label("obj_id"),
            asmnt.assessment_type.label("obj_type"),
        ).join(
            Relationship,
            sa.and_(
                Relationship.destination_id == Snapshot.id,
                Relationship.destination_type == Snapshot.__name__,
            )
        ).join(
            asmnt,
            sa.and_(
                Relationship.source_type == asmnt.__name__,
                Relationship.source_id == asmnt.id,
            )
        ).filter(
            asmnt.id.in_(related_ids),
            Snapshot.child_type == asmnt.assessment_type,
        )
    ).alias("objects_mapped")
    return db.session.query(
        objects_mapped.c.obj_id.label("obj_id"),
        objects_mapped.c.obj_type.label("obj_type")
    )
Beispiel #30
0
    def test_union_all_heterogeneous_columns(self):
        emp, dep, conn = self._emp_d_fixture()

        s1 = select([emp.c.name])
        s2 = select([dep.c.name])
        u1 = union_all(s1, s2)
        r = self._exec_stmt(conn, u1)
        eq_(r.fetchall(), [('ed', ), ('wendy', ), ('jack', ),
                           ('Engineering', ), ('Accounting', ), ('Sales', )])
Beispiel #31
0
def _activities_union_all(*qlist):
    '''
    Return union of two or more activity queries sorted by timestamp,
    and remove duplicates
    '''
    import ckan.model as model
    return model.Session.query(model.Activity).select_entity_from(
        union_all(*[q.subquery().select() for q in qlist])
        ).distinct(model.Activity.timestamp)
Beispiel #32
0
    def test_union_all_homogeneous(self):
        emp, dep, conn = self._emp_d_fixture()

        s1 = select([emp.c.name]).where(emp.c.name == "wendy")
        s2 = select([emp.c.name
                     ]).where(or_(emp.c.name == 'wendy', emp.c.name == 'jack'))
        u1 = union_all(s1, s2)
        r = self._exec_stmt(conn, u1)
        eq_(r.fetchall(), [('wendy', ), ('wendy', ), ('jack', )])
Beispiel #33
0
    def test_without_default_polymorphic_buildit_newstyle(
        self, two_pjoin_fixture, use_star
    ):
        """how would we do these concrete polymorphic queries using 2.0 style,
        and not any old and esoteric features like "polymorphic_union" ?

        """
        (
            session,
            Employee,
            Engineer,
            Manager,
            Hacker,
            pjoin,
            pjoin2,
            jdoe,
            sally,
            jenn,
            hacker,
        ) = two_pjoin_fixture

        # make a union using the entities as given and wpoly from it.
        # a UNION is a UNION.  there is no way around having to write
        # out filler columns.  concrete inh is really not a good choice
        # when you need to select heterogeneously
        stmt = union_all(
            select(
                literal("engineer").label("type"),
                Engineer,
                null().label("nickname"),
            ),
            select(literal("hacker").label("type"), Hacker),
        ).subquery()

        # issue: if we make this with_polymorphic(Engineer, [Hacker], ...),
        # it blows up and tries to add the "engineer" table for unknown reasons

        if use_star:
            wp = with_polymorphic(
                Engineer, "*", stmt, polymorphic_on=stmt.c.type
            )
        else:
            wp = with_polymorphic(
                Engineer, [Engineer, Hacker], stmt, polymorphic_on=stmt.c.type
            )

        result = session.execute(select(wp)).scalars()

        eq_(
            sorted(repr(obj) for obj in result),
            [
                "Engineer Jenn knows how to program",
                "Hacker Karina 'Badass' knows how to hack",
            ],
        )
Beispiel #34
0
    def my_members(self):
        """Returns a dictionary from ("User"|"Group", "name") tuples to records."""

        parent = aliased(Group)
        group_member = aliased(Group)
        user_member = aliased(User)

        now = datetime.utcnow()

        users = (self.session.query(
            label("id", user_member.id),
            label("type", literal("User")),
            label("name", user_member.username),
            label("role", GroupEdge._role),
            label("edge_id", GroupEdge.id),
            label("expiration", GroupEdge.expiration),
        ).filter(
            parent.id == self.id,
            parent.id == GroupEdge.group_id,
            user_member.id == GroupEdge.member_pk,
            GroupEdge.active == True,
            parent.enabled == True,
            user_member.enabled == True,
            or_(GroupEdge.expiration > now, GroupEdge.expiration == None),
            GroupEdge.member_type == 0,
        ).group_by("type", "name").subquery())

        groups = (self.session.query(
            label("id", group_member.id),
            label("type", literal("Group")),
            label("name", group_member.groupname),
            label("role", GroupEdge._role),
            label("edge_id", GroupEdge.id),
            label("expiration", GroupEdge.expiration),
        ).filter(
            parent.id == self.id,
            parent.id == GroupEdge.group_id,
            group_member.id == GroupEdge.member_pk,
            GroupEdge.active == True,
            parent.enabled == True,
            group_member.enabled == True,
            or_(GroupEdge.expiration > now, GroupEdge.expiration == None),
            GroupEdge.member_type == 1,
        ).subquery())

        query = (self.session.query("id", "type", "name", "role", "edge_id",
                                    "expiration").select_entity_from(
                                        union_all(users.select(),
                                                  groups.select())).order_by(
                                                      desc("role"),
                                                      desc("type")))

        return OrderedDict(
            ((record.type, record.name), record) for record in query.all())
Beispiel #35
0
def recursive_search(root_inodes, gid, not_gid, uid, not_uid, mtime, size):
    """
    Perform a recursive search of all files under `root_inodes` with the given constraints
    """
    roots = sa.union_all(*[
        sa.select([
            sa.literal(r[0]).label("device"),
            sa.literal(r[1]).label("inode")
        ]) for r in root_inodes
    ]).alias("roots")

    inode = m.Inode.__table__.alias("inode")
    cte = (sa.select([
        *inode.c,
        sa.func.dusql_path_func(inode.c.parent_inode, inode.c.device,
                                inode.c.basename).label("path"),
    ]).select_from(
        inode.join(
            roots,
            sa.and_(inode.c.inode == roots.c.inode,
                    inode.c.device == roots.c.device),
        ))).cte(recursive=True)

    parent = cte.alias("parent")

    child_paths = cte.union_all(
        sa.select([
            *inode.c, (parent.c.path + "/" + inode.c.basename).label("path")
        ]).where(inode.c.parent_inode == parent.c.inode).where(
            inode.c.device == parent.c.device)).alias("find")

    q = sa.select(child_paths.c)

    if gid is not None:
        q = q.where(child_paths.c.gid == gid)
    if not_gid is not None:
        q = q.where(child_paths.c.gid != not_gid)
    if uid is not None:
        q = q.where(child_paths.c.uid == uid)
    if not_uid is not None:
        q = q.where(child_paths.c.uid != not_uid)
    if mtime is not None:
        if mtime < 0:
            q = q.where(child_paths.c.mtime <= -mtime)
        else:
            q = q.where(child_paths.c.mtime >= mtime)
    if size is not None:
        if size < 0:
            q = q.where(child_paths.c.size <= -size)
        else:
            q = q.where(child_paths.c.size >= size)

    return q
Beispiel #36
0
    def get(self):
        query = self.get_argument("query", "")
        offset = int(self.get_argument("offset", 0))
        limit = int(self.get_argument("limit", 100))
        if limit > 9000:
            limit = 9000

        groups = (
            self.session.query(
                label("type", literal("Group")),
                label("id", Group.id),
                label("name", Group.groupname),
            )
            .filter(Group.enabled == True, Group.groupname.like("%{}%".format(query)))
            .subquery()
        )

        permissions = (
            self.session.query(
                label("type", literal("Permission")),
                label("id", Permission.id),
                label("name", Permission.name),
            )
            .filter(Permission.enabled == True, Permission.name.like("%{}%".format(query)))
            .subquery()
        )

        users = (
            self.session.query(
                label("type", literal("User")), label("id", User.id), label("name", User.username)
            )
            .filter(User.enabled == True, User.username.like("%{}%".format(query)))
            .subquery()
        )

        results_query = self.session.query("type", "id", "name").select_entity_from(
            union_all(users.select(), permissions.select(), groups.select())
        )
        total = results_query.count()
        results = results_query.offset(offset).limit(limit).all()

        if len(results) == 1:
            result = results[0]
            return self.redirect("/{}s/{}".format(result.type.lower(), result.name))

        self.render(
            "search.html",
            results=results,
            search_query=query,
            offset=offset,
            limit=limit,
            total=total,
        )
 def invalid_query(self, session):
     cte = session.query(Languoid.pk,
             Languoid.pk.label('father_pk'), sa.literal(0).label('depth'))\
         .cte(recursive=True)
     parent = sa.orm.aliased(Languoid)
     cte = cte.union_all(session.query(cte.c.pk, parent.father_pk, cte.c.depth + 1)\
         .join(parent, cte.c.father_pk == parent.pk)\
         .filter(parent.father_pk != None))
     tree1 = session.query(TreeClosureTable.child_pk, TreeClosureTable.parent_pk, TreeClosureTable.depth)
     tree2 = session.query(cte.c.pk, cte.c.father_pk, cte.c.depth)
     diff = sa.union_all(tree1.except_all(tree2), tree2.except_all(tree1))
     return session.query(diff.alias())
Beispiel #38
0
 def join_committee_queries(self, kwargs):
     queries = []
     total = 0
     for committee_id in kwargs['committee_id']:
         query, count = self.build_committee_query(kwargs, committee_id)
         queries.append(query.subquery().select())
         total += count
     query = models.db.session.query(models.ScheduleA).select_entity_from(
         sa.union_all(*queries))
     query = query.options(sa.orm.joinedload(models.ScheduleA.committee))
     query = query.options(sa.orm.joinedload(models.ScheduleA.contributor))
     return query, total
Beispiel #39
0
def get_stats_query():
    # cf. https://glottolog.org/glottolog/glottologinformation

    def languoid_count(kind, cls=Languoid, fromclause=Languoid,
                       level=None, is_root=None):
        select_nrows = (select(sa.literal(kind).label('kind'),
                               sa.func.count().label('n'))
                        .select_from(fromclause))

        if level is not None:
            select_nrows = select_nrows.where(cls.level == level)

        if is_root is not None:
            cond = ((cls.parent == sa.null()) if is_root
                    else (cls.parent != sa.null()))
            select_nrows = select_nrows.where(cond)

        return select_nrows

    Root, Child, root_child = Languoid.parent_descendant(innerjoin='reflexive',
                                                         parent_root=True)

    language_count = functools.partial(languoid_count,
                                       cls=Child, fromclause=root_child,
                                       level=LANGUAGE)

    def iterselects():
        yield languoid_count('languoids')

        yield languoid_count('families', level=FAMILY, is_root=True)

        yield languoid_count('isolates', level=LANGUAGE, is_root=True)

        yield languoid_count('roots', is_root=True)

        yield languoid_count('languages', level=LANGUAGE)

        yield languoid_count('subfamilies', level=FAMILY, is_root=False)

        yield languoid_count('dialects', level=DIALECT)

        yield (language_count('Spoken L1 Languages')
               .where(Root.name.notin_(SPECIAL_FAMILIES + (BOOKKEEPING,))))

        for name in SPECIAL_FAMILIES:
            yield language_count(name).where(Root.name == name)

        yield language_count('All').where(Root.name != BOOKKEEPING)

        yield language_count(BOOKKEEPING).where(Root.name == BOOKKEEPING)

    return sa.union_all(*iterselects())
Beispiel #40
0
 def join_committee_queries(self, kwargs):
     """Build and compose per-committee subqueries using `UNION ALL`.
     """
     queries = []
     total = 0
     for committee_id in kwargs.get('committee_id', []):
         query, count = self.build_committee_query(kwargs, committee_id)
         queries.append(query.subquery().select())
         total += count
     query = models.db.session.query(self.model).select_entity_from(
         sa.union_all(*queries))
     query = query.options(*self.query_options)
     return query, total
Beispiel #41
0
def glottocode(name, conn, codes=None):
    letters = slug(name)[:4].ljust(4, 'a')
    active = select([cast(func.substring(Languoid.id, 5), Integer).label('number')])\
        .where(Languoid.id.startswith(letters))
    legacy = select([cast(func.substring(LegacyCode.id, 5), Integer).label('number')])\
        .where(LegacyCode.id.startswith(letters))
    if not codes:
        known = union_all(active, legacy)
    else:
        dirty = select([cast(func.substring(literal_column('dirty'), 5), Integer).label('number')])\
            .select_from(func.unnest(list(codes)).alias('dirty'))\
            .where(literal_column('dirty').startswith(letters))
        known = union_all(active, legacy, dirty)
    number = conn.execute(select([func.coalesce(func.max(literal_column('number') + 1), 1234)])\
        .select_from(known.alias())).scalar()
    number = str(number)
    assert len(number) == 4
    res = letters + number
    assert GLOTTOCODE_PATTERN.match(res)
    if codes is not None:
        codes[res] = True
    return res
Beispiel #42
0
def _select_device_id_context_ids_sub_query(
    start_day: dt,
    end_day: dt,
    event_types: tuple[str, ...],
    json_quotable_device_ids: list[str],
) -> CompoundSelect:
    """Generate a subquery to find context ids for multiple devices."""
    return select(
        union_all(
            select_events_context_id_subquery(
                start_day, end_day, event_types).where(
                    apply_event_device_id_matchers(json_quotable_device_ids)),
        ).c.context_id)
Beispiel #43
0
def make_day_period_cte(days, period_list, default_endless=True):
    """Create a CTE (common table expr) that maps a date to a period ID.

    Provide a list of days to include in the CTE and the candidate periods.

    Return a tuple:
    - day_periods: [(day, period_id)]
    - day_period_cte
    - missing_periods, a boolean that is true when some of the days
      don't map to any period.
    """
    # Choose a period for the movements, entries, or recos on a given date.
    day_periods = []  # [(date, period_id)]
    missing_period = False
    for day in days:
        period = get_period_for_day(period_list,
                                    day,
                                    default_endless=default_endless)
        if period is None:
            missing_period = True
        else:
            day_periods.append((day, period.id))

    if day_periods:
        # Turn day_periods into day_period_cte, a common table expression
        # that contains a simple mapping of date to period ID.
        # See: https://stackoverflow.com/questions/44140632

        # Optimization to reduce the size of the statement:
        # Type cast only for first row;
        # for other rows the database will infer.
        stmts = [
            select([
                cast(literal(d), Date).label('day'),
                cast(literal(pid), BigInteger).label('period_id'),
            ]) for (d, pid) in day_periods[:1]
        ]
        stmts.extend(
            select([literal(d), literal(pid)]) for (d, pid) in day_periods[1:])
        day_period_cte = union_all(*stmts).cte(name='day_period_cte')

    else:
        # There are no periods for any of the days.
        # Use a table with zero rows as day_period_cte.
        day_period_cte = (select([
            cast(literal(None), Date).label('day'),
            cast(literal(None), BigInteger).label('period_id'),
        ]).where(literal(1) == literal(0)).cte(name='day_period_cte'))

    return day_periods, day_period_cte, missing_period
 def invalid_query(self, session):
     cte = session.query(Languoid.pk,
             Languoid.pk.label('father_pk'), sa.literal(0).label('depth'))\
         .cte(recursive=True)
     parent = sa.orm.aliased(Languoid)
     cte = cte.union_all(session.query(cte.c.pk, parent.father_pk, cte.c.depth + 1)\
         .join(parent, cte.c.father_pk == parent.pk)\
         .filter(parent.father_pk != None))
     tree1 = session.query(TreeClosureTable.child_pk,
                           TreeClosureTable.parent_pk,
                           TreeClosureTable.depth)
     tree2 = session.query(cte.c.pk, cte.c.father_pk, cte.c.depth)
     diff = sa.union_all(tree1.except_all(tree2), tree2.except_all(tree1))
     return session.query(diff.alias())
Beispiel #45
0
    def test_union_all(self):
        e = union_all(select([t1.c.col3]),
                      union(
                          select([t1.c.col3]),
                          select([t1.c.col3]),
                      ))

        wanted = [('aaa', ), ('aaa', ), ('bbb', ), ('bbb', ), ('ccc', ),
                  ('ccc', )]
        found1 = self._fetchall_sorted(e.execute())
        eq_(found1, wanted)

        found2 = self._fetchall_sorted(e.alias('foo').select().execute())
        eq_(found2, wanted)
Beispiel #46
0
 def join_committee_queries(self, kwargs):
     queries = []
     total = 0
     for committee_id in kwargs['committee_id']:
         query, count = self.build_committee_query(kwargs, committee_id)
         queries.append(query.subquery().select())
         total += count
     query = models.db.session.query(
         models.ScheduleA
     ).select_entity_from(
         sa.union_all(*queries)
     )
     query = query.options(sa.orm.joinedload(models.ScheduleA.committee))
     query = query.options(sa.orm.joinedload(models.ScheduleA.contributor))
     return query, total
Beispiel #47
0
    def test_union_all(self):
        e = union_all(
            select([t1.c.col3]),
            union(
                select([t1.c.col3]),
                select([t1.c.col3]),
            )
        )

        wanted = [('aaa',), ('aaa',), ('bbb',), ('bbb',), ('ccc',), ('ccc',)]
        found1 = self._fetchall_sorted(e.execute())
        eq_(found1, wanted)

        found2 = self._fetchall_sorted(e.alias('foo').select().execute())
        eq_(found2, wanted)
Beispiel #48
0
 def join_committee_queries(self, kwargs):
     """Build and compose per-committee subqueries using `UNION ALL`.
     """
     queries = []
     total = 0
     for committee_id in kwargs.get('committee_id', []):
         query, count = self.build_committee_query(kwargs, committee_id)
         queries.append(query.subquery().select())
         total += count
     query = models.db.session.query(
         self.model
     ).select_entity_from(
         sa.union_all(*queries)
     )
     query = query.options(*self.query_options)
     return query, total
def argos_unchecked_list(request):
    """Returns the unchecked sensor data summary.
    """
    # SQL query
    unchecked = union_all(
        select([
            Argos.pk,
            Argos.ptt.label('ptt'),
            Argos.date,
            literal('argos/gps').label('type')
        ]).where(Argos.checked == False),
        select([
            Gps.pk,
            Gps.ptt.label('ptt'),
            Gps.date,
            literal('argos/gps').label('type')
        ]).where(Gps.checked == False),
        select([
            Gsm.pk_id,
            Gsm.fk_ptt.label('ptt'),
            Gsm.date,
            literal('gsm').label('type')
        ]).where(Gsm.checked == False)
    ).alias()
    # Add the bird associated to each ptt.
    pie = ProtocolIndividualEquipment
    unchecked_with_ind = select([
        pie.ind_id.label('ind_id'),
        'ptt',
        func.count().label('count'),
        'type'
    ]).select_from(
        unchecked.join(SatTrx, SatTrx.ptt == unchecked.c.ptt)
        .outerjoin(
            pie,
            and_(SatTrx.id == pie.sat_id,
                 unchecked.c.date >= pie.begin_date,
                 or_(
                     unchecked.c.date < pie.end_date,
                     pie.end_date == None
                )
            )
        )
    ).group_by('ptt', 'type', pie.ind_id)#.order_by('ptt')
    # Populate Json array
    data = DBSession.execute(unchecked_with_ind).fetchall()
    return [dict(row) for row in data]
Beispiel #50
0
    def test_union_all_lightweight(self):
        """like test_union_all, but breaks the sub-union into
        a subquery with an explicit column reference on the outside,
        more palatable to a wider variety of engines.

        """

        u = union(select([t1.c.col3]), select([t1.c.col3])).alias()

        e = union_all(select([t1.c.col3]), select([u.c.col3]))

        wanted = [("aaa",), ("aaa",), ("bbb",), ("bbb",), ("ccc",), ("ccc",)]
        found1 = self._fetchall_sorted(e.execute())
        eq_(found1, wanted)

        found2 = self._fetchall_sorted(e.alias("foo").select().execute())
        eq_(found2, wanted)
Beispiel #51
0
    def test_distinct(self):
        mapper(Item, items)
        mapper(Order, orders, properties={
            'items':relation(Item, secondary=order_items, lazy=True)
        })
        mapper(User, users, properties={
            'addresses':relation(mapper(Address, addresses), lazy=True),
            'orders':relation(Order, lazy=True)
        })

        sess = create_session()
        q = sess.query(User)

        # use a union all to get a lot of rows to join against
        u2 = users.alias('u2')
        s = sa.union_all(u2.select(use_labels=True), u2.select(use_labels=True), u2.select(use_labels=True)).alias('u')
        print [key for key in s.c.keys()]
Beispiel #52
0
 def get_priorities(self):
     incomplete_builds = self.get_incomplete_builds_query()
     queries = self.get_priority_queries().values()
     union_query = union_all(*queries).alias('un')
     pkg_id = union_query.c.pkg_id
     current_priority = cast(func.sum(union_query.c.priority),
                             Integer).label('curr_priority')
     priorities = self.db.query(pkg_id, current_priority)\
                         .group_by(pkg_id).subquery()
     return self.db.query(Package.id, priorities.c.curr_priority *
                          Collection.priority_coefficient +
                          Package.manual_priority + Package.static_priority)\
                   .join(Package.collection)\
                   .join(priorities, Package.id == priorities.c.pkg_id)\
                   .filter((Package.resolved == True) |
                           (Package.resolved == None))\
                   .filter(Package.id.notin_(incomplete_builds.subquery()))\
                   .filter(Package.blocked == False)\
                   .filter(Package.tracked == True)\
                   .order_by(priorities.c.curr_priority.desc())\
                   .all()
Beispiel #53
0
Datei: api.py Projekt: TvoroG/imh
 def get(self):
     last_vk = (Entity.query
                .filter_by(alien_site='vk')
                .order_by(Entity.id.desc())
                .limit(50)
                .subquery())
     last_instagram = (Entity.query
                       .filter_by(alien_site='instagram')
                       .order_by(Entity.id.desc())
                       .limit(50)
                       .subquery())
     last_twitter = (Entity.query
                     .filter_by(alien_site='twitter')
                     .order_by(Entity.id.desc())
                     .limit(50)
                     .subquery())
     last = (db.session.query(Entity)
             .select_entity_from(union_all(last_vk.select(),
                                           last_instagram.select(),
                                           last_twitter.select())))
     return {'entities': [l.serialize for l in last.all()]}
  def _similar_asmnt_assessment(cls, type_, id_):
    """Find similar Assessments for Assessment object.

    Args:
        type_: Assessment type.
        id_: Assessment id.

    Returns:
        SQLAlchemy query that yields results [(similar_id,)] - the id of
        similar objects.
    """
    from ggrc.models import all_models
    asmnt = all_models.Assessment

    asmnt_mapped = cls.mapped_to_assessment([id_]).subquery()
    # Find Assessments directly mapped to Snapshot of same object
    similar_queries = cls.mapped_to_obj_snapshot(
        asmnt_mapped.c.obj_type, asmnt_mapped.c.obj_id
    )

    # Find Assessments mapped to Snapshot of object mapped to base object
    # Object1 <-> Object2 <-> Snapshot of Object2 <-> Assessment
    mapped_obj = cls.mapped_objs(
        asmnt_mapped.c.obj_type, asmnt_mapped.c.obj_id, True
    )
    similar_queries += cls.mapped_to_obj_snapshot(
        mapped_obj.c.obj_type, mapped_obj.c.obj_id
    )

    similar_objs = sa.union_all(*similar_queries).alias("scoped_similar")
    return db.session.query(similar_objs.c.similar_id).join(
        asmnt,
        sa.and_(
            asmnt.assessment_type == similar_objs.c.related_type,
            asmnt.id == similar_objs.c.similar_id,
        )
    ).filter(
        asmnt.id != id_,
        similar_objs.c.similar_type == type_,
    )
  def mapped_objs(cls, object_type, object_id, same_type_mapped=False):
    """Find all instances that have relationship with provided object.

    Args:
        object_type: Type of object (can be str or SQLAlchemy property).
        object_id: Id of object (can be int or SQLAlchemy property).
        same_type_mapped: If True - related objects with same type only
            will be searched.

    Returns:
        SQLAlchemy query with id and type of found
        objects [(obj_id, obj_type)].
    """
    source_rel = db.session.query(
        Relationship.source_id.label("obj_id"),
        Relationship.source_type.label("obj_type"),
        Relationship.destination_type.label("base_type"),
    ).filter(
        Relationship.destination_type == object_type,
        Relationship.destination_id == object_id,
    )

    destination_rel = db.session.query(
        Relationship.destination_id,
        Relationship.destination_type,
        Relationship.source_type,
    ).filter(
        Relationship.source_type == object_type,
        Relationship.source_id == object_id,
    )

    if same_type_mapped:
      source_rel = source_rel.filter(
          Relationship.source_type == Relationship.destination_type
      )
      destination_rel = destination_rel.filter(
          Relationship.source_type == Relationship.destination_type
      )
    return sa.union_all(source_rel, destination_rel).alias("mapped_related")
def argos_unchecked_list(request):
    """Returns the unchecked Argos data summary.
    """
    # SQL query
    unchecked = union_all(
        select([
            Argos.pk,
            Argos.ptt.label('ptt'),
            Argos.date
        ]).where(Argos.checked == False),
        select([
            Gps.pk,
            Gps.ptt.label('ptt'),
            Gps.date
        ]).where(Gps.checked == False)
    ).alias()
    # Add the bird associated to each ptt.
    pie = ProtocolIndividualEquipment
    unchecked_with_ind = select([
        pie.ind_id.label('ind_id'),
        'ptt',
        func.count().label('nb')
    ]).select_from(
        unchecked.join(SatTrx, SatTrx.ptt == unchecked.c.ptt)
        .outerjoin(
            pie,
            and_(SatTrx.id == pie.sat_id,
                 unchecked.c.date >= pie.begin_date,
                 or_(
                     unchecked.c.date < pie.end_date,
                     pie.end_date == None
                )
            )
        )
    ).group_by('ptt', pie.ind_id).order_by('ptt')
    # Populate Json array
    data = DBSession.execute(unchecked_with_ind).fetchall()
    return [{'ptt':ptt,'ind_id':ind_id, 'count':nb} for ind_id, ptt, nb in data]
Beispiel #57
0
    query=union_all(
        # Priority 20: valid case (interface's mac w/ vlan at correct ports)
        # <mac> @ <switch>/<port> → <vlan>_[un]tagged (Prio 20)
        # Parsing continues because of Fall-Through:=Yes
        Query([
            Interface.mac.label('UserName'),
            # `host()` does not print the `/32` like `text` would
            func.host(Switch.management_ip).label('NASIPAddress'),
            SwitchPort.name.label('NASPortId'),
            # TODO: add `_tagged` instead if interface needs that
            (VLAN.name + '_untagged').label('GroupName'),
            literal(20).label('Priority'),
        ]).select_from(User)
        .join(Host)
        .join(Interface)
        .join(Host.room)
        .join(Room.connected_patch_ports)
        .join(SwitchPort)
        .join(Switch)
        .join(Interface.ips)
        .join(Subnet)
        .join(VLAN)
        .join(User.current_properties)
        .filter(CurrentProperty.property_name == 'network_access')
        .statement,

        # Priority -10: Blocking reason exists
        # <mac> @ <switch>/<port> → <blocking_group> (Prio -10)
        # Note that Fall-Through:=No for blocking groups, so first match terminates
        Query([
            Interface.mac.label('UserName'),
            func.host(Switch.management_ip).label('NASIPAddress'),
            SwitchPort.name.label('NASPortId'),
            radius_property.c.property.label('GroupName'),
            literal(-10).label('Priority'),
        ]).select_from(User)
        .join(Host)
        .join(Host.interfaces)
        .join(Host.room)
        .join(Room.connected_patch_ports)
        .join(SwitchPort)
        .join(Switch)
        .join(User.current_properties)
        .join(radius_property,
              radius_property.c.property == CurrentProperty.property_name)
        .statement,

        # Priority 0: No blocking reason exists → generic error group `no_network_access`
        Query([
            Interface.mac.label('UserName'),
            func.host(Switch.management_ip).label('NASIPAddress'),
            SwitchPort.name.label('NASPortId'),
            literal('no_network_access').label('GroupName'),
            literal(0).label('Priority'),
        ]).select_from(User)
        .outerjoin(network_access_subq, User.id == network_access_subq.c.user_id)
        .filter(network_access_subq.c.network_access == None)
        .join(User.hosts)
        .join(Host.interfaces)
        .join(Host.room)
        .join(Room.connected_patch_ports)
        .join(SwitchPort)
        .join(Switch)
        .statement,
    ),
Beispiel #58
0
def get_queues():
    queue_names = db.session.query(DataSource.queue_name).distinct()
    scheduled_queue_names = db.session.query(DataSource.scheduled_queue_name).distinct()
    query = db.session.execute(union_all(queue_names, scheduled_queue_names))

    return ['celery'] + [row[0] for row in query]
Beispiel #59
0
def cascade_unmappable(exp, object_class, target_class, query):
  """Special operator to get the effect of cascade unmap of Issue from Asmt."""
  issue_id = exp["issue"].get("id")
  assessment_id = exp["assessment"].get("id")

  if not issue_id:
    raise BadQueryException("Missing 'id' key in 'issue': {}"
                            .format(exp["issue"]))
  if not assessment_id:
    raise BadQueryException("Missing 'id' key in 'assessment': {}"
                            .format(exp["assessment"]))

  if object_class.__name__ not in {"Audit", "Snapshot"}:
    raise BadQueryException("'cascade_unmapping' can't be applied to {}"
                            .format(object_class.__name__))

  mapped_to_issue = aliased(sqlalchemy.union_all(
      db.session.query(
          all_models.Relationship.destination_id.label("target_id"),
      ).filter(
          all_models.Relationship.source_id == issue_id,
          all_models.Relationship.source_type == "Issue",
          all_models.Relationship.destination_type == object_class.__name__,
          ~all_models.Relationship.automapping_id.is_(None),
      ),
      db.session.query(
          all_models.Relationship.source_id.label("target_id"),
      ).filter(
          all_models.Relationship.destination_id == issue_id,
          all_models.Relationship.destination_type == "Issue",
          all_models.Relationship.source_type == object_class.__name__,
      ),
  ), name="mapped_to_issue")

  mapped_to_assessment = aliased(sqlalchemy.union_all(
      db.session.query(
          all_models.Relationship.destination_id.label("target_id"),
      ).filter(
          all_models.Relationship.source_id == assessment_id,
          all_models.Relationship.source_type == "Assessment",
          all_models.Relationship.destination_type == object_class.__name__,
      ),
      db.session.query(
          all_models.Relationship.source_id.label("target_id"),
      ).filter(
          all_models.Relationship.destination_id == assessment_id,
          all_models.Relationship.destination_type == "Assessment",
          all_models.Relationship.source_type == object_class.__name__,
      ),
  ), "mapped_to_assessment")

  other_assessments = aliased(sqlalchemy.union_all(
      db.session.query(
          all_models.Relationship.destination_id.label("assessment_id"),
      ).filter(
          all_models.Relationship.source_id == issue_id,
          all_models.Relationship.source_type == "Issue",
          all_models.Relationship.destination_id != assessment_id,
          all_models.Relationship.destination_type == "Assessment",
      ),
      db.session.query(
          all_models.Relationship.source_id.label("assessment_id"),
      ).filter(
          all_models.Relationship.destination_id == issue_id,
          all_models.Relationship.destination_type == "Issue",
          all_models.Relationship.source_id != assessment_id,
          all_models.Relationship.source_type == "Assessment",
      ),
  ), "other_assessments")

  mapped_to_other_assessments = aliased(sqlalchemy.union_all(
      db.session.query(
          all_models.Relationship.destination_id.label("target_id"),
      ).filter(
          all_models.Relationship.source_id.in_(other_assessments),
          all_models.Relationship.source_type == "Assessment",
          all_models.Relationship.destination_type == object_class.__name__,
      ),
      db.session.query(
          all_models.Relationship.source_id.label("target_id"),
      ).filter(
          all_models.Relationship.destination_id != assessment_id,
          all_models.Relationship.destination_type == "Assessment",
          all_models.Relationship.source_type == object_class.__name__,
      ),
  ), "mapped_to_other_assessments")

  result = set(db.session.query(mapped_to_issue))
  result &= set(db.session.query(mapped_to_assessment))
  result -= set(db.session.query(mapped_to_other_assessments))

  if not result:
    return sqlalchemy.sql.false()

  return object_class.id.in_([row[0] for row in result])