class BoardWithColumnsQueryFilter:
    board_alias = alias(Board)
    columns_alias = alias(BoardColumn)

    @classmethod
    def base_query(cls):

        join = cls.board_alias.outerjoin(
            cls.columns_alias,
            cls.board_alias.c.id == cls.columns_alias.c.board_id)

        query = sa.select([
            cls.board_alias.c.uuid.label("board_uuid"),
            cls.board_alias.c.name.label("board_name"),
            cls.columns_alias.c.name.label("column_name"),
            cls.columns_alias.c.uuid.label("column_uuid"),
        ]).select_from(join)

        return query

    @classmethod
    def board_with_columns_query(cls, board_uuid):

        query = cls.base_query().where(cls.board_alias.c.uuid == board_uuid)

        return query
示例#2
0
def get_property_mod_flags_query(
    table,
    tracked_columns,
    mod_suffix='_mod',
    end_tx_column_name='end_transaction_id',
    tx_column_name='transaction_id',
):
    v1 = sa.alias(table, name='v')
    v2 = sa.alias(table, name='v2')
    primary_keys = [c.name for c in table.c if c.primary_key]

    return sa.select(
        columns=[
            getattr(v1.c, column)
            for column in primary_keys
        ] + [
            (sa.or_(
                getattr(v1.c, column) != getattr(v2.c, column),
                getattr(v2.c, tx_column_name).is_(None)
            )).label(column + mod_suffix)
            for column in tracked_columns
        ],
        from_obj=v1.outerjoin(
            v2,
            sa.and_(
                getattr(v2.c, end_tx_column_name) ==
                getattr(v1.c, tx_column_name),
                *[
                    getattr(v2.c, pk) == getattr(v1.c, pk)
                    for pk in primary_keys
                    if pk != tx_column_name
                ]
            )
        )
    ).order_by(getattr(v1.c, tx_column_name))
class BoardParticipantsFilterQuery(AbstractBaseFilter):
    user_alias = alias(User, "users")
    user_in_board_alias = alias(UserOnBoard, "user_boards")
    board_alias = alias(Board)

    @classmethod
    def filter_user_uuid(cls, request: Request, query: Select, value: str):
        return query.where(cls.user_alias.c.uuid == value)

    @classmethod
    def filter_by_role(cls, request: Request, query: Select, value: str):
        return query.where(cls.user_in_board_alias.c.role == value)

    @classmethod
    def base_query(cls, board_uuid: str) -> Select:
        join = cls.user_in_board_alias.outerjoin(
            cls.board_alias, cls.user_in_board_alias.c.board_id ==
            cls.board_alias.c.id).outerjoin(
                cls.user_alias,
                cls.user_in_board_alias.c.user_id == cls.user_alias.c.id)

        query = (sa.select([
            cls.user_alias,
            cls.board_alias.c.uuid.label("board_uuid"),
            cls.user_in_board_alias.c.role,
        ]).select_from(join).where(cls.board_alias.c.uuid == board_uuid))

        return query

    uuid = filter_user_uuid
    role = filter_by_role
示例#4
0
def get_property_mod_flags_query(
    table,
    tracked_columns,
    mod_suffix='_mod',
    end_tx_column_name='end_transaction_id',
    tx_column_name='transaction_id',
):
    v1 = sa.alias(table, name='v')
    v2 = sa.alias(table, name='v2')
    primary_keys = [c.name for c in table.c if c.primary_key]

    return sa.select(
        columns=[getattr(v1.c, column) for column in primary_keys] +
        [(sa.or_(
            getattr(v1.c, column) != getattr(v2.c, column),
            getattr(v2.c,
                    tx_column_name).is_(None))).label(column + mod_suffix)
         for column in tracked_columns],
        from_obj=v1.outerjoin(
            v2,
            sa.and_(
                getattr(v2.c,
                        end_tx_column_name) == getattr(v1.c, tx_column_name),
                *[
                    getattr(v2.c, pk) == getattr(v1.c, pk)
                    for pk in primary_keys if pk != tx_column_name
                ]))).order_by(getattr(v1.c, tx_column_name))
示例#5
0
def teams():

    team = alias(Team)
    subteam = alias(Team)

    query = db.session.query(team.c.id.label('id_1'),
                             team.c.name.label('name_1')) \
                      .filter(team.c.parent_id.is_(None))

    teams = get_team_hierarchy(query, team, subteam)

    search_term = request.args.get('search')

    if search_term:
        teams = teams.subquery(with_labels=True)
        name_cols = [c for c in teams.c if 'name' in c.name]
        teams = db.session.query(*(c.label(c.name) for c in teams.c)) \
                          .filter(or_(*(c.ilike(f'%%{search_term}%%')
                                        for c in name_cols)))

    if not teams.count():
        return jsonify([])

    teams = jsonify_teams(teams)
    teams = to_dict(teams)
    teams = denullify_teams(teams)

    return jsonify(teams)
示例#6
0
def get_end_tx_column_query(table,
                            end_tx_column_name='end_transaction_id',
                            tx_column_name='transaction_id'):

    v1 = sa.alias(table, name='v')
    v2 = sa.alias(table, name='v2')
    v3 = sa.alias(table, name='v3')

    primary_keys = [c.name for c in table.c if c.primary_key]

    tx_criterion = sa.select([sa.func.min(getattr(
        v3.c, tx_column_name))]).where(
            sa.and_(
                getattr(v3.c, tx_column_name) > getattr(v1.c, tx_column_name),
                *[
                    getattr(v3.c, pk) == getattr(v1.c, pk)
                    for pk in primary_keys if pk != tx_column_name
                ]))
    return sa.select(
        columns=[getattr(v1.c, column) for column in primary_keys] +
        [getattr(v2.c, tx_column_name).label(end_tx_column_name)],
        from_obj=v1.outerjoin(
            v2,
            sa.and_(getattr(v2.c, tx_column_name) == tx_criterion))).order_by(
                getattr(v1.c, tx_column_name))
示例#7
0
async def select_order_by_id(conn, order_id):
    in_tx = sa.alias(Tx, name="in_tx")
    out_tx = sa.alias(Tx, name="out_tx")
    j_in = join(Order, in_tx, Order.in_tx == in_tx.c.id.label(name="in_tx_id"))
    j_out = join(j_in, out_tx,
                 Order.out_tx == out_tx.c.id.label(name="out_tx_id"))

    where = Order.id == order_id

    q = (select([
        Order.order_type,
        in_tx.c.coin.label("in_tx_coin"),
        in_tx.c.tx_id.label("in_tx_hash"),
        in_tx.c.from_address.label("in_tx_from"),
        in_tx.c.to_address.label("in_tx_to"),
        in_tx.c.amount.label("in_tx_amount"),
        in_tx.c.created_at.label("in_tx_created_at"),
        in_tx.c.error.label("in_tx_error"),
        in_tx.c.confirmations.label("in_tx_confirmations"),
        in_tx.c.max_confirmations.label("in_tx_max_confirmations"),
        out_tx.c.coin.label("out_tx_coin"),
        out_tx.c.tx_id.label("out_tx_hash"),
        out_tx.c.from_address.label("out_tx_from"),
        out_tx.c.to_address.label("out_tx_to"),
        out_tx.c.amount.label("out_tx_amount"),
        out_tx.c.created_at.label("out_tx_created_at"),
        out_tx.c.error.label("out_tx_error"),
        out_tx.c.confirmations.label("out_tx_confirmations"),
        out_tx.c.max_confirmations.label("out_tx_max_confirmations"),
    ]).select_from(j_in).select_from(j_out).where(where))
    cursor = await conn.execute(q)
    order = await cursor.fetchone()
    return order
    def _peaks_query(self, session):
        anode = alias(ChromatogramTreeNode.__table__)
        bnode = alias(ChromatogramTreeNode.__table__)
        apeak = alias(DeconvolutedPeak.__table__)

        peak_join = apeak.join(
            ChromatogramTreeNodeToDeconvolutedPeak,
            ChromatogramTreeNodeToDeconvolutedPeak.c.peak_id == apeak.c.id)

        root_peaks_join = peak_join.join(
            anode,
            ChromatogramTreeNodeToDeconvolutedPeak.c.node_id == anode.c.id).join(
            ChromatogramToChromatogramTreeNode,
            ChromatogramToChromatogramTreeNode.c.node_id == anode.c.id)

        branch_peaks_join = peak_join.join(
            anode,
            ChromatogramTreeNodeToDeconvolutedPeak.c.node_id == anode.c.id).join(
            ChromatogramTreeNodeBranch,
            ChromatogramTreeNodeBranch.child_id == anode.c.id).join(
            bnode, ChromatogramTreeNodeBranch.parent_id == bnode.c.id).join(
            ChromatogramToChromatogramTreeNode,
            ChromatogramToChromatogramTreeNode.c.node_id == bnode.c.id)

        branch_ids = select([apeak.c.id]).where(
            ChromatogramToChromatogramTreeNode.c.chromatogram_id == self.id
        ).select_from(branch_peaks_join)

        root_ids = select([apeak.c.id]).where(
            ChromatogramToChromatogramTreeNode.c.chromatogram_id == self.id
        ).select_from(root_peaks_join)

        all_ids = root_ids.union_all(branch_ids)
        peaks = session.execute(all_ids).fetchall()
        return {p[0] for p in peaks}
示例#9
0
def _get_allocations_by_provider_id(ctx, rp_id):
    allocs = sa.alias(_ALLOC_TBL, name="a")
    consumers = sa.alias(_CONSUMER_TBL, name="c")
    projects = sa.alias(_PROJECT_TBL, name="p")
    users = sa.alias(_USER_TBL, name="u")
    cols = [
        allocs.c.id,
        allocs.c.resource_class_id,
        allocs.c.used,
        allocs.c.updated_at,
        allocs.c.created_at,
        consumers.c.id.label("consumer_id"),
        consumers.c.generation.label("consumer_generation"),
        sql.func.coalesce(consumers.c.uuid,
                          allocs.c.consumer_id).label("consumer_uuid"),
        projects.c.id.label("project_id"),
        projects.c.external_id.label("project_external_id"),
        users.c.id.label("user_id"),
        users.c.external_id.label("user_external_id"),
    ]
    # TODO(jaypipes): change this join to be on ID not UUID
    consumers_join = sa.join(allocs, consumers,
                             allocs.c.consumer_id == consumers.c.uuid)
    projects_join = sa.join(consumers_join, projects,
                            consumers.c.project_id == projects.c.id)
    users_join = sa.join(projects_join, users,
                         consumers.c.user_id == users.c.id)
    sel = sa.select(cols).select_from(users_join)
    sel = sel.where(allocs.c.resource_provider_id == rp_id)

    return [dict(r) for r in ctx.session.execute(sel)]
示例#10
0
    def test_double(self):
        """tests lazy loading with two relationships simulatneously,
        from the same table, using aliases.  """

        users, orders, User, Address, Order, addresses = (
            self.tables.users, self.tables.orders, self.classes.User,
            self.classes.Address, self.classes.Order, self.tables.addresses)

        openorders = sa.alias(orders, 'openorders')
        closedorders = sa.alias(orders, 'closedorders')

        mapper(Address, addresses)

        mapper(Order, orders)

        open_mapper = mapper(Order, openorders, non_primary=True)
        closed_mapper = mapper(Order, closedorders, non_primary=True)
        mapper(User,
               users,
               properties=dict(addresses=relationship(Address, lazy=True),
                               open_orders=relationship(
                                   open_mapper,
                                   primaryjoin=sa.and_(
                                       openorders.c.isopen == 1,
                                       users.c.id == openorders.c.user_id),
                                   lazy='select'),
                               closed_orders=relationship(
                                   closed_mapper,
                                   primaryjoin=sa.and_(
                                       closedorders.c.isopen == 0,
                                       users.c.id == closedorders.c.user_id),
                                   lazy='select')))
        q = create_session().query(User)

        assert [
            User(id=7,
                 addresses=[Address(id=1)],
                 open_orders=[Order(id=3)],
                 closed_orders=[Order(id=1), Order(id=5)]),
            User(id=8,
                 addresses=[Address(id=2),
                            Address(id=3),
                            Address(id=4)],
                 open_orders=[],
                 closed_orders=[]),
            User(id=9,
                 addresses=[Address(id=5)],
                 open_orders=[Order(id=4)],
                 closed_orders=[Order(id=2)]),
            User(id=10)
        ] == q.all()

        sess = create_session()
        user = sess.query(User).get(7)
        eq_([Order(id=1), Order(id=5)],
            create_session().query(closed_mapper).with_parent(
                user, property='closed_orders').all())
        eq_([Order(id=3)],
            create_session().query(open_mapper).with_parent(
                user, property='open_orders').all())
示例#11
0
def create_ff_regression_df(session, team_stats_tbl, sched_tbl, ff_list):
    """Create and return a regression data frame of the four factors (ff) for each team in a matchup.

    Args:
        session: Sqlalchemy session object
        team_stats_tbl: mapped team stats table object
        sched_tbl: mapped schedule table object
        ff_list: List of the four factors variable

    Returns:
         A data frame with home('_h') and away('_a') stats and the margin of victory (mov). The mov is the target
         for a regression. The '_h' and '_a" stats are the home and away four factors in a specific matchup.
    """
    home_stats = alias(team_stats_tbl, name='home')
    away_stats = alias(team_stats_tbl, name='away')
    sched = alias(sched_tbl, name='sched')
    home_stat_ff = [
        getattr(home_stats.c, col) for col in ff_list
        if col in home_stats.c.keys()
    ]
    away_stat_ff = [
        getattr(away_stats.c, col) for col in ff_list
        if col in away_stats.c.keys()
    ]
    sched_stats_query = session.query(sched, *home_stat_ff, *away_stat_ff).filter(sched.c['home_team_score'] > 0).\
        join(home_stats, home_stats.c['id'] == sched.c['home_stats_id']).\
        join(away_stats, away_stats.c['id'] == sched.c['away_stats_id']).subquery(with_labels=True)
    sched_stats = session.query(sched_stats_query)

    df = conversion.convert_sql_statement_to_table(session,
                                                   sched_stats.statement)
    return df
示例#12
0
def get_sharing_providers(ctx, rp_ids=None):
    """Returns a list of resource provider IDs (internal IDs, not UUIDs)
    that indicate that they share resource via an aggregate association.

    Shared resource providers are marked with a standard trait called
    MISC_SHARES_VIA_AGGREGATE. This indicates that the provider allows its
    inventory to be consumed by other resource providers associated via an
    aggregate link.

    For example, assume we have two compute nodes, CN_1 and CN_2, each with
    inventory of VCPU and MEMORY_MB but not DISK_GB (in other words, these are
    compute nodes with no local disk). There is a resource provider called
    "NFS_SHARE" that has an inventory of DISK_GB and has the
    MISC_SHARES_VIA_AGGREGATE trait. Both the "CN_1" and "CN_2" compute node
    resource providers and the "NFS_SHARE" resource provider are associated
    with an aggregate called "AGG_1".

    The scheduler needs to determine the resource providers that can fulfill a
    request for 2 VCPU, 1024 MEMORY_MB and 100 DISK_GB.

    Clearly, no single provider can satisfy the request for all three
    resources, since neither compute node has DISK_GB inventory and the
    NFS_SHARE provider has no VCPU or MEMORY_MB inventories.

    However, if we consider the NFS_SHARE resource provider as providing
    inventory of DISK_GB for both CN_1 and CN_2, we can include CN_1 and CN_2
    as potential fits for the requested set of resources.

    To facilitate that matching query, this function returns all providers that
    indicate they share their inventory with providers in some aggregate.

    :param rp_ids: When present, returned resource providers are limited to
                   only those in this value
    """
    # The SQL we need to generate here looks like this:
    #
    # SELECT rp.id
    # FROM resource_providers AS rp
    #   INNER JOIN resource_provider_traits AS rpt
    #     ON rp.id = rpt.resource_provider_id
    #     AND rpt.trait_id = ${"MISC_SHARES_VIA_AGGREGATE" trait id}
    # WHERE rp.id IN $(RP_IDs)

    sharing_trait = trait_obj.Trait.get_by_name(
        ctx, os_traits.MISC_SHARES_VIA_AGGREGATE)

    rp_tbl = sa.alias(_RP_TBL, name='rp')
    rpt_tbl = sa.alias(_RP_TRAIT_TBL, name='rpt')

    rp_to_rpt_join = sa.join(
        rp_tbl, rpt_tbl,
        sa.and_(rp_tbl.c.id == rpt_tbl.c.resource_provider_id,
                rpt_tbl.c.trait_id == sharing_trait.id))

    sel = sa.select([rp_tbl.c.id]).select_from(rp_to_rpt_join)
    if rp_ids:
        sel = sel.where(rp_tbl.c.id.in_(rp_ids))

    return set(r[0] for r in ctx.session.execute(sel))
示例#13
0
    def _as_array_query(self, session):
        anode = alias(ChromatogramTreeNode.__table__)
        bnode = alias(ChromatogramTreeNode.__table__)
        apeak = alias(DeconvolutedPeak.__table__)

        peak_join = apeak.join(
            ChromatogramTreeNodeToDeconvolutedPeak,
            ChromatogramTreeNodeToDeconvolutedPeak.c.peak_id == apeak.c.id)

        root_peaks_join = peak_join.join(
            anode, ChromatogramTreeNodeToDeconvolutedPeak.c.node_id ==
            anode.c.id).join(
                ChromatogramToChromatogramTreeNode,
                ChromatogramToChromatogramTreeNode.c.node_id == anode.c.id)

        branch_peaks_join = peak_join.join(
            anode,
            ChromatogramTreeNodeToDeconvolutedPeak.c.node_id == anode.c.id
        ).join(
            ChromatogramTreeNodeBranch,
            ChromatogramTreeNodeBranch.child_id == anode.c.id).join(
                bnode,
                ChromatogramTreeNodeBranch.parent_id == bnode.c.id).join(
                    ChromatogramToChromatogramTreeNode,
                    ChromatogramToChromatogramTreeNode.c.node_id == bnode.c.id)

        branch_intensities = select([
            apeak.c.intensity, anode.c.retention_time
        ]).where(ChromatogramToChromatogramTreeNode.c.chromatogram_id ==
                 self.id).select_from(branch_peaks_join)

        root_intensities = select([
            apeak.c.intensity, anode.c.retention_time
        ]).where(ChromatogramToChromatogramTreeNode.c.chromatogram_id ==
                 self.id).select_from(root_peaks_join)

        all_intensities_q = root_intensities.union_all(
            branch_intensities).order_by(anode.c.retention_time)

        all_intensities = session.execute(all_intensities_q).fetchall()

        time = []
        signal = []
        current_signal = all_intensities[0][0]
        current_time = all_intensities[0][1]

        for intensity, rt in all_intensities[1:]:
            if abs(current_time - rt) < 1e-4:
                current_signal += intensity
            else:
                time.append(current_time)
                signal.append(current_signal)
                current_time = rt
                current_signal = intensity
        time.append(current_time)
        signal.append(current_signal)
        return np.array(time), np.array(signal)
示例#14
0
def get_traits_by_provider_id(context, rp_id):
    t = sa.alias(_TRAIT_TBL, name='t')
    rpt = sa.alias(_RP_TRAIT_TBL, name='rpt')

    join_cond = sa.and_(t.c.id == rpt.c.trait_id,
                        rpt.c.resource_provider_id == rp_id)
    join = sa.join(t, rpt, join_cond)
    sel = sa.select([t.c.id, t.c.name, t.c.created_at,
                     t.c.updated_at]).select_from(join)
    return [dict(r) for r in context.session.execute(sel).fetchall()]
    def test_double(self):
        """Eager loading with two relationships simultaneously, 
            from the same table, using aliases."""

        openorders = sa.alias(orders, 'openorders')
        closedorders = sa.alias(orders, 'closedorders')

        mapper(Address, addresses)
        mapper(Order, orders)

        open_mapper = mapper(Order, openorders, non_primary=True)
        closed_mapper = mapper(Order, closedorders, non_primary=True)

        mapper(User,
               users,
               properties=dict(addresses=relationship(Address,
                                                      lazy='subquery',
                                                      order_by=addresses.c.id),
                               open_orders=relationship(
                                   open_mapper,
                                   primaryjoin=sa.and_(
                                       openorders.c.isopen == 1,
                                       users.c.id == openorders.c.user_id),
                                   lazy='subquery',
                                   order_by=openorders.c.id),
                               closed_orders=relationship(
                                   closed_mapper,
                                   primaryjoin=sa.and_(
                                       closedorders.c.isopen == 0,
                                       users.c.id == closedorders.c.user_id),
                                   lazy='subquery',
                                   order_by=closedorders.c.id)))

        q = create_session().query(User).order_by(User.id)

        def go():
            eq_([
                User(id=7,
                     addresses=[Address(id=1)],
                     open_orders=[Order(id=3)],
                     closed_orders=[Order(id=1), Order(id=5)]),
                User(id=8,
                     addresses=[Address(id=2),
                                Address(id=3),
                                Address(id=4)],
                     open_orders=[],
                     closed_orders=[]),
                User(id=9,
                     addresses=[Address(id=5)],
                     open_orders=[Order(id=4)],
                     closed_orders=[Order(id=2)]),
                User(id=10)
            ], q.all())

        self.assert_sql_count(testing.db, go, 4)
示例#16
0
    def test_double(self):
        """tests lazy loading with two relationships simulatneously, from the same table, using aliases.  """

        users, orders, User, Address, Order, addresses = (self.tables.users,
                                self.tables.orders,
                                self.classes.User,
                                self.classes.Address,
                                self.classes.Order,
                                self.tables.addresses)


        openorders = sa.alias(orders, 'openorders')
        closedorders = sa.alias(orders, 'closedorders')

        mapper(Address, addresses)

        mapper(Order, orders)

        open_mapper = mapper(Order, openorders, non_primary=True)
        closed_mapper = mapper(Order, closedorders, non_primary=True)
        mapper(User, users, properties = dict(
            addresses = relationship(Address, lazy = True),
            open_orders = relationship(open_mapper, primaryjoin = sa.and_(openorders.c.isopen == 1, users.c.id==openorders.c.user_id), lazy='select'),
            closed_orders = relationship(closed_mapper, primaryjoin = sa.and_(closedorders.c.isopen == 0, users.c.id==closedorders.c.user_id), lazy='select')
        ))
        q = create_session().query(User)

        assert [
            User(
                id=7,
                addresses=[Address(id=1)],
                open_orders = [Order(id=3)],
                closed_orders = [Order(id=1), Order(id=5)]
            ),
            User(
                id=8,
                addresses=[Address(id=2), Address(id=3), Address(id=4)],
                open_orders = [],
                closed_orders = []
            ),
            User(
                id=9,
                addresses=[Address(id=5)],
                open_orders = [Order(id=4)],
                closed_orders = [Order(id=2)]
            ),
            User(id=10)

        ] == q.all()

        sess = create_session()
        user = sess.query(User).get(7)
        assert [Order(id=1), Order(id=5)] == create_session().query(closed_mapper).with_parent(user, property='closed_orders').all()
        assert [Order(id=3)] == create_session().query(open_mapper).with_parent(user, property='open_orders').all()
    def test_double(self):
        """Eager loading with two relationships simultaneously, 
            from the same table, using aliases."""

        openorders = sa.alias(orders, 'openorders')
        closedorders = sa.alias(orders, 'closedorders')

        mapper(Address, addresses)
        mapper(Order, orders)

        open_mapper = mapper(Order, openorders, non_primary=True)
        closed_mapper = mapper(Order, closedorders, non_primary=True)

        mapper(User, users, properties = dict(
            addresses = relationship(Address, lazy='subquery',
                                        order_by=addresses.c.id),
            open_orders = relationship(
                open_mapper,
                primaryjoin=sa.and_(openorders.c.isopen == 1,
                                 users.c.id==openorders.c.user_id),
                lazy='subquery', order_by=openorders.c.id),
            closed_orders = relationship(
                closed_mapper,
                primaryjoin=sa.and_(closedorders.c.isopen == 0,
                                 users.c.id==closedorders.c.user_id),
                lazy='subquery', order_by=closedorders.c.id)))

        q = create_session().query(User).order_by(User.id)

        def go():
            eq_([
                User(
                    id=7,
                    addresses=[Address(id=1)],
                    open_orders = [Order(id=3)],
                    closed_orders = [Order(id=1), Order(id=5)]
                ),
                User(
                    id=8,
                    addresses=[Address(id=2), Address(id=3), Address(id=4)],
                    open_orders = [],
                    closed_orders = []
                ),
                User(
                    id=9,
                    addresses=[Address(id=5)],
                    open_orders = [Order(id=4)],
                    closed_orders = [Order(id=2)]
                ),
                User(id=10)

            ], q.all())
        self.assert_sql_count(testing.db, go, 4)
示例#18
0
    def _aj_lon_lat(self, s, new=True):

        start = to_time(self.nearby.start)
        finish = to_time(self.nearby.finish)

        agroup = s.query(ActivityGroup).filter(
            ActivityGroup.name == self.nearby.activity_group).one()
        lat = s.query(StatisticName.id). \
            filter(StatisticName.name == LATITUDE, StatisticName.constraint == agroup).scalar()
        lon = s.query(StatisticName.id). \
            filter(StatisticName.name == LONGITUDE, StatisticName.constraint == agroup).scalar()

        if not lat or not lon:
            log.warning(
                f'No {LATITUDE} or {LONGITUDE} in database for {agroup}')
            return

        sj_lat = inspect(StatisticJournal).local_table
        sj_lon = alias(inspect(StatisticJournal).local_table)
        sjf_lat = inspect(StatisticJournalFloat).local_table
        sjf_lon = alias(inspect(StatisticJournalFloat).local_table)
        aj = inspect(ActivityJournal).local_table
        ns = inspect(ActivitySimilarity).local_table

        existing_lo = select([ns.c.activity_journal_lo_id]). \
            where(ns.c.constraint == self.nearby.constraint)
        existing_hi = select([ns.c.activity_journal_hi_id]). \
            where(ns.c.constraint == self.nearby.constraint)
        existing = existing_lo.union(existing_hi).cte()

        stmt = select([sj_lat.c.source_id, sjf_lon.c.value, sjf_lat.c.value]). \
            select_from(sj_lat).select_from(sj_lon).select_from(sjf_lat).select_from(sjf_lat).select_from(aj). \
            where(and_(sj_lat.c.source_id == sj_lon.c.source_id,  # same source
                       sj_lat.c.time == sj_lon.c.time,            # same time
                       sj_lat.c.source_id == aj.c.id,             # and associated with an activity
                       aj.c.activity_group_id == agroup.id,       # of the right group
                       sj_lat.c.id == sjf_lat.c.id,               # lat sub-class
                       sj_lon.c.id == sjf_lon.c.id,               # lon sub-class
                       sj_lat.c.statistic_name_id == lat,         # lat name
                       sj_lon.c.statistic_name_id == lon,         # lon name
                       sj_lat.c.time >= start.timestamp(),        # time limits
                       sj_lat.c.time < finish.timestamp(),
                       sjf_lat.c.value > self.nearby.latitude - self.nearby.height / 2,
                       sjf_lat.c.value < self.nearby.latitude + self.nearby.height / 2,
                       sjf_lon.c.value > self.nearby.longitude - self.nearby.width / 2,
                       sjf_lon.c.value < self.nearby.longitude + self.nearby.width / 2))

        if new:
            stmt = stmt.where(func.not_(sj_lat.c.source_id.in_(existing)))
        else:
            stmt = stmt.where(sj_lat.c.source_id.in_(existing))
        stmt = stmt.order_by(sj_lat.c.source_id)  # needed for seen logic
        yield from s.connection().execute(stmt)
    def _as_array_query(self, session):
        anode = alias(ChromatogramTreeNode.__table__)
        bnode = alias(ChromatogramTreeNode.__table__)
        apeak = alias(DeconvolutedPeak.__table__)

        peak_join = apeak.join(
            ChromatogramTreeNodeToDeconvolutedPeak,
            ChromatogramTreeNodeToDeconvolutedPeak.c.peak_id == apeak.c.id)

        root_peaks_join = peak_join.join(
            anode,
            ChromatogramTreeNodeToDeconvolutedPeak.c.node_id == anode.c.id).join(
            ChromatogramToChromatogramTreeNode,
            ChromatogramToChromatogramTreeNode.c.node_id == anode.c.id)

        branch_peaks_join = peak_join.join(
            anode,
            ChromatogramTreeNodeToDeconvolutedPeak.c.node_id == anode.c.id).join(
            ChromatogramTreeNodeBranch,
            ChromatogramTreeNodeBranch.child_id == anode.c.id).join(
            bnode, ChromatogramTreeNodeBranch.parent_id == bnode.c.id).join(
            ChromatogramToChromatogramTreeNode,
            ChromatogramToChromatogramTreeNode.c.node_id == bnode.c.id)

        branch_intensities = select([apeak.c.intensity, anode.c.retention_time]).where(
            ChromatogramToChromatogramTreeNode.c.chromatogram_id == self.id
        ).select_from(branch_peaks_join)

        root_intensities = select([apeak.c.intensity, anode.c.retention_time]).where(
            ChromatogramToChromatogramTreeNode.c.chromatogram_id == self.id
        ).select_from(root_peaks_join)

        all_intensities_q = root_intensities.union_all(branch_intensities).order_by(
            anode.c.retention_time)

        all_intensities = session.execute(all_intensities_q).fetchall()

        time = []
        signal = []
        current_signal = all_intensities[0][0]
        current_time = all_intensities[0][1]

        for intensity, rt in all_intensities[1:]:
            if abs(current_time - rt) < 1e-4:
                current_signal += intensity
            else:
                time.append(current_time)
                signal.append(current_signal)
                current_time = rt
                current_signal = intensity
        time.append(current_time)
        signal.append(current_signal)
        return np.array(time), np.array(signal)
示例#20
0
def get_providers_with_resource(ctx, rc_id, amount, tree_root_id=None):
    """Returns a set of tuples of (provider ID, root provider ID) of providers
    that satisfy the request for a single resource class.

    :param ctx: Session context to use
    :param rc_id: Internal ID of resource class to check inventory for
    :param amount: Amount of resource being requested
    :param tree_root_id: An optional root provider ID. If provided, the results
                         are limited to the resource providers under the given
                         root resource provider.
    """
    # SELECT rp.id, rp.root_provider_id
    # FROM resource_providers AS rp
    # JOIN inventories AS inv
    #  ON rp.id = inv.resource_provider_id
    #  AND inv.resource_class_id = $RC_ID
    # LEFT JOIN (
    #  SELECT
    #    alloc.resource_provider_id,
    #    SUM(allocs.used) AS used
    #  FROM allocations AS alloc
    #  WHERE allocs.resource_class_id = $RC_ID
    #  GROUP BY allocs.resource_provider_id
    # ) AS usage
    #  ON inv.resource_provider_id = usage.resource_provider_id
    # WHERE
    #  used + $AMOUNT <= ((total - reserved) * inv.allocation_ratio)
    #  AND inv.min_unit <= $AMOUNT
    #  AND inv.max_unit >= $AMOUNT
    #  AND $AMOUNT % inv.step_size == 0
    rpt = sa.alias(_RP_TBL, name="rp")
    inv = sa.alias(_INV_TBL, name="inv")
    usage = _usage_select([rc_id])
    rp_to_inv = sa.join(
        rpt, inv,
        sa.and_(rpt.c.id == inv.c.resource_provider_id,
                inv.c.resource_class_id == rc_id))
    inv_to_usage = sa.outerjoin(
        rp_to_inv, usage,
        inv.c.resource_provider_id == usage.c.resource_provider_id)
    sel = sa.select([rpt.c.id, rpt.c.root_provider_id])
    sel = sel.select_from(inv_to_usage)
    where_conds = _capacity_check_clause(amount, usage, inv_tbl=inv)
    if tree_root_id is not None:
        where_conds = sa.and_(rpt.c.root_provider_id == tree_root_id,
                              where_conds)
    sel = sel.where(where_conds)
    res = ctx.session.execute(sel).fetchall()
    res = set((r[0], r[1]) for r in res)
    return res
示例#21
0
    def _adducts_query(self, session):
        anode = alias(ChromatogramTreeNode.__table__)
        bnode = alias(ChromatogramTreeNode.__table__)
        apeak = alias(DeconvolutedPeak.__table__)

        peak_join = apeak.join(
            ChromatogramTreeNodeToDeconvolutedPeak,
            ChromatogramTreeNodeToDeconvolutedPeak.c.peak_id == apeak.c.id)

        root_peaks_join = peak_join.join(
            anode, ChromatogramTreeNodeToDeconvolutedPeak.c.node_id ==
            anode.c.id).join(
                ChromatogramToChromatogramTreeNode,
                ChromatogramToChromatogramTreeNode.c.node_id == anode.c.id)

        branch_peaks_join = peak_join.join(
            anode,
            ChromatogramTreeNodeToDeconvolutedPeak.c.node_id == anode.c.id
        ).join(
            ChromatogramTreeNodeBranch,
            ChromatogramTreeNodeBranch.child_id == anode.c.id).join(
                bnode,
                ChromatogramTreeNodeBranch.parent_id == bnode.c.id).join(
                    ChromatogramToChromatogramTreeNode,
                    ChromatogramToChromatogramTreeNode.c.node_id == bnode.c.id)

        branch_node_info = select([
            anode.c.node_type_id, anode.c.retention_time
        ]).where(ChromatogramToChromatogramTreeNode.c.chromatogram_id ==
                 self.id).select_from(branch_peaks_join)

        root_node_info = select([
            anode.c.node_type_id, anode.c.retention_time
        ]).where(ChromatogramToChromatogramTreeNode.c.chromatogram_id ==
                 self.id).select_from(root_peaks_join)

        all_node_info_q = root_node_info.union_all(branch_node_info).order_by(
            anode.c.retention_time)

        all_node_info = session.execute(all_node_info_q).fetchall()

        node_type_ids = set()
        for node_type_id, rt in all_node_info:
            node_type_ids.add(node_type_id)

        node_types = []
        for ntid in node_type_ids:
            node_types.append(
                session.query(CompoundMassShift).get(ntid).convert())
        return node_types
示例#22
0
def anchors_for_sharing_providers(context, rp_ids, get_id=False):
    """Given a list of internal IDs of sharing providers, returns a set of
    tuples of (sharing provider UUID, anchor provider UUID), where each of
    anchor is the unique root provider of a tree associated with the same
    aggregate as the sharing provider. (These are the providers that can
    "anchor" a single AllocationRequest.)

    The sharing provider may or may not itself be part of a tree; in either
    case, an entry for this root provider is included in the result.

    If the sharing provider is not part of any aggregate, the empty list is
    returned.

    If get_id is True, it returns a set of tuples of (sharing provider ID,
    anchor provider ID) instead.
    """
    # SELECT sps.uuid, COALESCE(rps.uuid, shr_with_sps.uuid)
    # FROM resource_providers AS sps
    # INNER JOIN resource_provider_aggregates AS shr_aggs
    #   ON sps.id = shr_aggs.resource_provider_id
    # INNER JOIN resource_provider_aggregates AS shr_with_sps_aggs
    #   ON shr_aggs.aggregate_id = shr_with_sps_aggs.aggregate_id
    # INNER JOIN resource_providers AS shr_with_sps
    #   ON shr_with_sps_aggs.resource_provider_id = shr_with_sps.id
    # LEFT JOIN resource_providers AS rps
    #   ON shr_with_sps.root_provider_id = rps.id
    # WHERE sps.id IN $(RP_IDs)
    rps = sa.alias(_RP_TBL, name='rps')
    sps = sa.alias(_RP_TBL, name='sps')
    shr_aggs = sa.alias(_RP_AGG_TBL, name='shr_aggs')
    shr_with_sps_aggs = sa.alias(_RP_AGG_TBL, name='shr_with_sps_aggs')
    shr_with_sps = sa.alias(_RP_TBL, name='shr_with_sps')
    join_chain = sa.join(sps, shr_aggs,
                         sps.c.id == shr_aggs.c.resource_provider_id)
    join_chain = sa.join(
        join_chain, shr_with_sps_aggs,
        shr_aggs.c.aggregate_id == shr_with_sps_aggs.c.aggregate_id)
    join_chain = sa.join(
        join_chain, shr_with_sps,
        shr_with_sps_aggs.c.resource_provider_id == shr_with_sps.c.id)
    if get_id:
        sel = sa.select([sps.c.id, shr_with_sps.c.root_provider_id])
    else:
        join_chain = sa.join(join_chain, rps,
                             shr_with_sps.c.root_provider_id == rps.c.id)
        sel = sa.select([sps.c.uuid, rps.c.uuid])
    sel = sel.select_from(join_chain)
    sel = sel.where(sps.c.id.in_(rp_ids))
    return set([(r[0], r[1]) for r in context.session.execute(sel).fetchall()])
示例#23
0
class BoardListFilterQuery(AbstractBaseFilter):
    user_alias = alias(User)
    board_alias = alias(Board)
    user_in_board_alias = alias(UserOnBoard)

    @classmethod
    def base_query(cls, user_id: int) -> Select:

        j = cls.board_alias.outerjoin(
            cls.user_in_board_alias,
            cls.board_alias.c.id == cls.user_in_board_alias.c.board_id,
        ).outerjoin(cls.user_alias,
                    cls.user_in_board_alias.c.user_id == cls.user_alias.c.id)

        query = (sa.select([
            cls.board_alias,
            cls.user_in_board_alias.c.role,
            cls.user_alias.c.uuid.label("user_uuid"),
        ]).select_from(j).where(
            cls.user_in_board_alias.c.user_id == user_id).distinct(
                cls.board_alias.c.id))
        return query

    @classmethod
    def get_my_boards_filter(cls,
                             request: Request,
                             query: Select,
                             value: bool = False) -> Select:
        if value:
            query = query.where(cls.board_alias.c.author_id == request.user.id)
        else:
            query = query.where(cls.board_alias.c.author_id != request.user.id)
        return query

    @classmethod
    def get_query_with_filters(cls, request: Request, model: Type["BaseModel"],
                               **kwargs) -> Select:
        """
        :param request:
        :param model: BaseModel model for validating data
        :param kwargs: params for base query
        :return: Query with filters
        """

        base_query = cls.base_query(**kwargs)
        filtered = cls.add_filters_from_request(request, model, base_query)
        return filtered

    my_boards = get_my_boards_filter
示例#24
0
    def generate_insert(self):
        result = []
        # first insert fields for which we have no change record
        staging_alias = catalog.aliases[self.target_alias['load']
                                        ['staging_alias']]

        # create a subselect from the transform query and add column
        # gdw_state_dts_range = all timestamp ranges for which this row is valid
        select_sql = sqlalchemy.alias(self.select_sql, 'source_transform')
        select_sql_columns = [
            select_sql.corresponding_column(c) for c in self.select_sql.c
        ]
        gdw_state_dts_range = func.prioritize_ranges(
            func.array_agg(
                func.tsrange(literal_column('gdw_state_start'),
                             literal_column('gdw_state_end'))).over(
                                 partition_by=[
                                     literal_column(c)
                                     for c in self.object_key_columns
                                 ],
                                 order_by=self.priority_order)).label(
                                     'gdw_state_dts_range')
        select_sql_columns.append(gdw_state_dts_range)

        result.append(staging_alias.sql_table.insert().from_select(
            self.stage_col_names, select(select_sql_columns)))

        # select all from existing dimension. look up the object key in stage and remove from
        # gdw_state_dts_range that already is calculated on stage
        select_sql = sqlalchemy.alias(self.select_sql, 'source_transform')
        select_sql_columns = [
            select_sql.corresponding_column(c) for c in self.select_sql.c
        ]
        gdw_state_dts_range = func.prioritize_ranges(
            func.array_agg(
                func.tsrange(literal_column('gdw_state_start'),
                             literal_column('gdw_state_end'))).over(
                                 partition_by=[
                                     literal_column(c)
                                     for c in self.object_key_columns
                                 ],
                                 order_by=self.priority_order)).label(
                                     'gdw_state_dts_range')
        select_sql_columns.append(gdw_state_dts_range)

        result.append(staging_alias.sql_table.insert().from_select(
            self.stage_col_names, select(select_sql_columns)))
        return result
示例#25
0
    def execute(self, metadata, connection, filter_values):
        try:
            table = metadata.tables[self.table_name]
        except KeyError:
            raise TableNotFoundException("Unable to query table, table not found: %s" % self.table_name)

        asha_table = self.get_asha_table(metadata)

        max_date_query = sqlalchemy.select([
            sqlalchemy.func.max(asha_table.c.date).label('date'),
            asha_table.c.case_id.label('case_id')
        ])

        if self.filters:
            for filter in self.filters:
                max_date_query.append_whereclause(filter.build_expression(table))

        max_date_query.append_group_by(
            asha_table.c.case_id
        )

        max_date_subquery = sqlalchemy.alias(max_date_query, 'max_date')

        checklist_query = sqlalchemy.select()
        for column in self.columns:
            checklist_query.append_column(column.build_column(asha_table))

        checklist_query = checklist_query.where(
            asha_table.c.case_id == max_date_subquery.c.case_id
        ).where(
            asha_table.c.date == max_date_subquery.c.date
        )

        return connection.execute(checklist_query, **filter_values).fetchall()
示例#26
0
def set_root_provider_ids(context, batch_size):
    """Simply sets the root_provider_id value for a provider identified by
    rp_id. Used in explicit online data migration via CLI.

    :param rp_id: Internal ID of the provider to update
    :param root_id: Value to set root provider to
    """
    # UPDATE resource_providers
    # SET root_provider_id=resource_providers.id
    # WHERE resource_providers.id
    # IN (SELECT subq_1.id
    #     FROM (SELECT resource_providers.id AS id
    #           FROM resource_providers
    #           WHERE resource_providers.root_provider_id IS NULL
    #           LIMIT :param_1)
    #     AS subq_1)

    subq_1 = context.session.query(_RP_TBL.c.id)
    subq_1 = subq_1.filter(_RP_TBL.c.root_provider_id.is_(None))
    subq_1 = subq_1.limit(batch_size)
    subq_1 = sa.alias(subq_1.as_scalar(), name="subq_1")

    subq_2 = sa.select([subq_1.c.id]).select_from(subq_1)

    upd = _RP_TBL.update().where(_RP_TBL.c.id.in_(subq_2.as_scalar()))
    upd = upd.values(root_provider_id=_RP_TBL.c.id)
    res = context.session.execute(upd)

    return res.rowcount, res.rowcount
示例#27
0
文件: table.py 项目: adityaU/dataset
    def find(self, _limit=None, _offset=0, _step=5000,
             order_by='id', **_filter):
        """
        Performs a simple search on the table. Simply pass keyword arguments as ``filter``.
        ::

            results = table.find(country='France')
            results = table.find(country='France', year=1980)

        Using ``_limit``::

            # just return the first 10 rows
            results = table.find(country='France', _limit=10)

        You can sort the results by single or multiple columns. Append a minus sign
        to the column name for descending order::

            # sort results by a column 'year'
            results = table.find(country='France', order_by='year')
            # return all rows sorted by multiple columns (by year in descending order)
            results = table.find(order_by=['country', '-year'])

        By default :py:meth:`find() <dataset.Table.find>` will break the
        query into chunks of ``_step`` rows to prevent huge tables
        from being loaded into memory at once.

        For more complex queries, please use :py:meth:`db.query() <dataset.Database.query>`
        instead."""
        self._check_dropped()
        if not isinstance(order_by, (list, tuple)):
            order_by = [order_by]
        order_by = [o for o in order_by if o in self.table.columns]
        order_by = [self._args_to_order_by(o) for o in order_by]

        args = self._args_to_clause(_filter)

        # query total number of rows first
        count_query = alias(self.table.select(whereclause=args, limit=_limit, offset=_offset), name='count_query_alias').count()
        rp = self.database.executable.execute(count_query)
        total_row_count = rp.fetchone()[0]

        if _step is None or _step is False or _step == 0:
            _step = total_row_count

        if total_row_count > _step and not order_by:
            _step = total_row_count
            log.warn("query cannot be broken into smaller sections because it is unordered")

        queries = []

        for i in count():
            qoffset = _offset + (_step * i)
            qlimit = _step
            if _limit is not None:
                qlimit = min(_limit - (_step * i), _step)
            if qlimit <= 0:
                break
            queries.append(self.table.select(whereclause=args, limit=qlimit,
                                             offset=qoffset, order_by=order_by))
        return ResultIter((self.database.executable.execute(q) for q in queries))
def compute_distance(session, table, monosaccharide_names, model=GlycopeptideMatch):
    distance_table = make_distance_table(model)
    distance_table.create(session.connection())
    from_entity = alias(table)
    to_entity = alias(table)

    distances = [getattr(from_entity.c, name) - getattr(to_entity.c, name) for name in monosaccharide_names]
    selected = [from_entity.c.id, to_entity.c.id] + distances
    q = session.query(*selected).join(to_entity, from_entity.c.id != to_entity.c.id).yield_per(1000)
    for fields in q:
        from_id, to_id = fields[:2]
        distance = sum(fields[2:])
        # print from_id, to_id, distance
        session.execute(distance_table.insert(), [{'from_id': from_id, "to_id": to_id, "distance": distance}])
    session.commit()
    return distance_table
示例#29
0
def _get_provider_ids_having_all_traits(ctx, required_traits):
    """Returns a set of resource provider internal IDs that have ALL of the
    required traits.

    NOTE: Don't call this method with no required_traits.

    :param ctx: Session context to use
    :param required_traits: A map, keyed by trait string name, of required
                            trait internal IDs that each provider must have
                            associated with it
    :raise ValueError: If required_traits is empty or None.
    """
    if not required_traits:
        raise ValueError('required_traits must not be empty')

    rptt = sa.alias(_RP_TRAIT_TBL, name="rpt")
    sel = sa.select([rptt.c.resource_provider_id])
    sel = sel.where(rptt.c.trait_id.in_(required_traits.values()))
    sel = sel.group_by(rptt.c.resource_provider_id)
    # Only get the resource providers that have ALL the required traits, so we
    # need to GROUP BY the resource provider and ensure that the
    # COUNT(trait_id) is equal to the number of traits we are requiring
    num_traits = len(required_traits)
    cond = sa.func.count(rptt.c.trait_id) == num_traits
    sel = sel.having(cond)
    return set(r[0] for r in ctx.session.execute(sel))
示例#30
0
  def filter_ancestors(self, and_self=False):
    "The same as :meth:`filter_descendants` but filters ancestor nodes."
    options = self._tree_options
    obj     = self._get_obj()

    #self._get_session_and_assert_flushed(obj)

    # Restrict ourselves to just those nodes within the same tree:
    tree_id = getattr(obj, self.tree_id_field.name)
    filter_ = self.tree_id_field == tree_id

    alias = sqlalchemy.alias(options.table)
    left_field = self.left_field
    filter_ &= sqlalchemy.between(
      getattr(alias.c, self.left_field.name),
      self.left_field, self.right_field)
    filter_ &= getattr(alias.c, self.pk_field.name) == \
               getattr(obj,     self.pk_field.name)

    if not and_self:
      filter_ &= self.pk_field != getattr(obj, self.pk_field.name)

    # WHERE tree_id = <node.tree_id> AND <node.path> LIKE path || '%'
    #filter_ = (self.tree_id_field == tree_id) \
    #          & sqlalchemy.sql.expression.literal(
    #                path, sqlalchemy.String
    #            ).like(options.path_field + '%')
    #if and_self:
    #  filter_ &= self.depth_field  <= depth
    #else:
    #  filter_ &= self.depth_field < depth
    return filter_
示例#31
0
    def apply_default_value(self, column):
        if column.default:
            execute = self.table.migration.conn.execute
            val = column.default.arg
            table = self.table.migration.metadata.tables[self.table.name]
            table.append_column(column)
            cname = getattr(table.c, column.name)
            if column.default.is_callable:
                table2 = alias(select([table]).limit(1).where(cname.is_(None)))
                Table = self.table.migration.metadata.tables['system_model']
                Column = self.table.migration.metadata.tables['system_column']
                j1 = join(Table, Column, Table.c.name == Column.c.model)
                query = select([func.count()]).select_from(table)
                nb_row = self.table.migration.conn.execute(query).fetchone()[0]
                query = select([Column.c.name]).select_from(j1)
                query = query.where(Column.c.primary_key.is_(True))
                query = query.where(Table.c.table == self.table.name)
                columns = [x[0] for x in execute(query).fetchall()]
                where = and_(*[getattr(table.c, x) == getattr(table2.c, x)
                               for x in columns])
                for offset in range(nb_row):
                    # call for each row because the default value
                    # could be a sequence or depend of other field
                    query = update(table).where(where).values(
                        {cname: val(None)})
                    execute(query)

            else:
                query = update(table).where(cname.is_(None)).values(
                    {cname: val})
                execute(query)
示例#32
0
    def execute(self, metadata, connection, filter_values):
        try:
            table = metadata.tables[self.table_name]
        except KeyError:
            raise TableNotFoundException("Unable to query table, table not found: %s" % self.table_name)

        asha_table = self.get_asha_table(metadata)

        max_date_query = sqlalchemy.select([
            sqlalchemy.func.max(asha_table.c.date).label('date'),
            asha_table.c.case_id.label('case_id')
        ])

        if self.filters:
            for filter in self.filters:
                max_date_query.append_whereclause(filter.build_expression(table))

        max_date_query.append_group_by(
            asha_table.c.case_id
        )

        max_date_subquery = sqlalchemy.alias(max_date_query, 'max_date')

        checklist_query = sqlalchemy.select()
        for column in self.columns:
            checklist_query.append_column(column.build_column(asha_table))

        checklist_query = checklist_query.where(
            asha_table.c.case_id == max_date_subquery.c.case_id
        ).where(
            asha_table.c.date == max_date_subquery.c.date
        )

        return connection.execute(checklist_query, **filter_values).fetchall()
示例#33
0
    def execute(self, metadata, connection, filter_values):
        asha_table = self.get_asha_table(metadata)

        max_date_query = sqlalchemy.select([
            sqlalchemy.func.max(asha_table.c.date).label('date'),
            asha_table.c.case_id.label('case_id')
        ])

        if self.filters:
            for filter in self.filters:
                max_date_query.append_whereclause(filter.build_expression())

        max_date_query.append_group_by(
            asha_table.c.case_id
        )

        max_date_subquery = sqlalchemy.alias(max_date_query, 'max_date')

        checklist_query = sqlalchemy.select()
        for column in self.columns:
            checklist_query.append_column(column.build_column(asha_table))

        checklist_query = checklist_query.where(
            asha_table.c.case_id == max_date_subquery.c.case_id
        ).where(
            asha_table.c.date == max_date_subquery.c.date
        )

        return connection.execute(checklist_query, **filter_values).fetchall()
示例#34
0
    def execute(self, connection, filter_values):
        max_date_query = sqlalchemy.select([
            sqlalchemy.func.max(sqlalchemy.column('completed_on')).label('completed_on'),
            sqlalchemy.column('case_id').label('case_id')
        ]).select_from(sqlalchemy.table(self.table_name))

        if self.filters:
            for filter in self.filters:
                max_date_query.append_whereclause(filter.build_expression())

        max_date_query.append_group_by(
            sqlalchemy.column('case_id')
        )

        max_date_subquery = sqlalchemy.alias(max_date_query, 'max_date')

        asha_table = self.get_asha_table_name()
        checklist_query = sqlalchemy.select()
        for column in self.columns:
            checklist_query.append_column(column.build_column())

        checklist_query = checklist_query.where(
            sqlalchemy.literal_column('"{}".case_id'.format(asha_table)) == max_date_subquery.c.case_id
        ).where(
            sqlalchemy.literal_column('"{}".completed_on'.format(asha_table)) == max_date_subquery.c.completed_on
        ).select_from(sqlalchemy.table(asha_table))

        return connection.execute(checklist_query, **filter_values).fetchall()
示例#35
0
def _provider_ids_from_root_ids(context, root_ids):
    """Given an iterable of internal root resource provider IDs, returns a
    dict, keyed by internal provider Id, of sqla objects describing those
    providers under the given root providers.

    :param root_ids: iterable of root provider IDs for trees to look up
    :returns: dict, keyed by internal provider Id, of sqla objects with the
              following attributes:

              id: resource provider internal id
              uuid: resource provider uuid
              parent_id: internal id of the resource provider's parent
                         provider (None if there is no parent)
              root_id: internal id of the resource providers's root provider
    """
    # SELECT
    #   rp.id, rp.uuid, rp.parent_provider_id, rp.root_provider.id
    # FROM resource_providers AS rp
    # WHERE rp.root_provider_id IN ($root_ids)
    me = sa.alias(_RP_TBL, name="me")
    cols = [
        me.c.id,
        me.c.uuid,
        me.c.parent_provider_id.label('parent_id'),
        me.c.root_provider_id.label('root_id'),
    ]
    sel = sa.select(cols).where(
        me.c.root_provider_id.in_(sa.bindparam('root_ids', expanding=True)))

    ret = {}
    for r in context.session.execute(sel, {'root_ids': list(root_ids)}):
        ret[r['id']] = r
    return ret
def test_delete_stmt_with_comma_subquery_alias_join():
    parent_ = sa.alias(product)

    del_stmt = (
        sa.delete(items)
        .where(items.c.order_id == orders.c.id)
        .where(orders.c.customer_id.in_(sa.select([customers.c.id]).where(customers.c.email.endswith("test.com"))))
        .where(items.c.product_id == product.c.id)
        .where(product.c.parent_id == parent_.c.id)
        .where(parent_.c.id != hammy_spam.c.ham_id)
    )

    expected = """
        DELETE FROM items
        USING orders, products, products AS products_1, "ham, spam"
        WHERE items.order_id = orders.id
        AND orders.customer_id IN
        (SELECT customers.id
        FROM customers
        WHERE (customers.email LIKE '%%' || 'test.com'))
        AND items.product_id = products.id
        AND products.parent_id = products_1.id
        AND products_1.id != "ham, spam".ham_id"""

    assert clean(compile_query(del_stmt)) == clean(expected)
 def qmonosaccharide(cls, monosaccharide_name):
     if monosaccharide_name in cls._qmonosaccharide_cache:
         return cls._qmonosaccharide_cache[monosaccharide_name]
     symbol = alias(cls.GlycanCompositionAssociation.__table__.select().where(
         cls.GlycanCompositionAssociation.__table__.c.base_type == monosaccharide_name),
         monosaccharide_name)
     cls._qmonosaccharide_cache[monosaccharide_name] = symbol
     return symbol
def test_delete_stmt_on_alias():
    parent_ = sa.alias(product)
    del_stmt = sa.delete(product).where(product.c.parent_id == parent_.c.id)
    expected = """
        DELETE FROM products
        USING products AS products_1
        WHERE products.parent_id = products_1.id"""
    assert clean(compile_query(del_stmt)) == clean(expected)
    def _weighted_neutral_mass_query(self, session):
        anode = alias(ChromatogramTreeNode.__table__)
        bnode = alias(ChromatogramTreeNode.__table__)
        apeak = alias(DeconvolutedPeak.__table__)

        peak_join = apeak.join(
            ChromatogramTreeNodeToDeconvolutedPeak,
            ChromatogramTreeNodeToDeconvolutedPeak.c.peak_id == apeak.c.id)

        root_peaks_join = peak_join.join(
            anode,
            ChromatogramTreeNodeToDeconvolutedPeak.c.node_id == anode.c.id).join(
            ChromatogramToChromatogramTreeNode,
            ChromatogramToChromatogramTreeNode.c.node_id == anode.c.id)

        branch_peaks_join = peak_join.join(
            anode,
            ChromatogramTreeNodeToDeconvolutedPeak.c.node_id == anode.c.id).join(
            ChromatogramTreeNodeBranch,
            ChromatogramTreeNodeBranch.child_id == anode.c.id).join(
            bnode, ChromatogramTreeNodeBranch.parent_id == bnode.c.id).join(
            ChromatogramToChromatogramTreeNode,
            ChromatogramToChromatogramTreeNode.c.node_id == bnode.c.id)

        branch_intensities = select([apeak.c.intensity, apeak.c.neutral_mass, anode.c.node_type_id]).where(
            ChromatogramToChromatogramTreeNode.c.chromatogram_id == self.id
        ).select_from(branch_peaks_join)

        root_intensities = select([apeak.c.intensity, apeak.c.neutral_mass, anode.c.node_type_id]).where(
            ChromatogramToChromatogramTreeNode.c.chromatogram_id == self.id
        ).select_from(root_peaks_join)

        all_intensity_mass_q = root_intensities.union_all(branch_intensities)

        all_intensity_mass = session.execute(all_intensity_mass_q).fetchall()

        arr = np.array(all_intensity_mass)
        mass = arr[:, 1]
        shift_ids = arr[:, 2].astype(int)
        distinct_shifts = set(shift_ids)
        for i in distinct_shifts:
            shift = session.query(CompoundMassShift).get(i)
            mass[shift_ids == i] -= shift.convert().mass
        intensity = arr[:, 0]
        return mass.dot(intensity) / intensity.sum()
示例#40
0
    def _build_median_query(self, median_id_table, median_table):
        """
        SELECT tu.user_name, (tu.value + tl.value) / 2.0 as value
        FROM temp_median_ids
        LEFT JOIN temp_median tu ON tu.id = temp_median_ids.upper
        LEFT JOIN temp_median tl ON tl.id = temp_median_ids.lower;
        """
        t_upper = alias(median_table, name="tup")
        t_lower = alias(median_table, name="tlo")

        final_query = select(from_obj=median_id_table)
        for group in self.group_by:
            final_query.append_column(t_upper.c[group])

        final_query.append_column(((t_upper.c[self.VAL_COL] + t_lower.c[self.VAL_COL]) / 2.0).label(self.alias))
        final_query.append_whereclause(median_id_table.c["upper"] == t_upper.c[self.ID_COL])
        final_query.append_whereclause(median_id_table.c["lower"] == t_lower.c[self.ID_COL])
        return final_query
示例#41
0
def test_date(session):
    dates = (
        date(2016, 1, 1),
        date(2016, 1, 2),
    )
    selects = tuple(sa.select((MakeADate(d),)) for d in dates)
    data = sa.alias(sa.union(*selects, use_labels=True), 'dates')
    stmt = sa.select((data,))
    result = session.execute(stmt).fetchall()
    assert tuple(chain.from_iterable(result)) == dates
示例#42
0
def get_end_tx_column_query(
    table,
    end_tx_column_name='end_transaction_id',
    tx_column_name='transaction_id'
):

    v1 = sa.alias(table, name='v')
    v2 = sa.alias(table, name='v2')
    v3 = sa.alias(table, name='v3')

    primary_keys = [c.name for c in table.c if c.primary_key]

    tx_criterion = sa.select(
        [sa.func.min(getattr(v3.c, tx_column_name))]
    ).where(
        sa.and_(
            getattr(v3.c, tx_column_name) > getattr(v1.c, tx_column_name),
            *[
                getattr(v3.c, pk) == getattr(v1.c, pk)
                for pk in primary_keys
                if pk != tx_column_name
            ]
        )
    )
    return sa.select(
        columns=[
            getattr(v1.c, column)
            for column in primary_keys
        ] + [
            getattr(v2.c, tx_column_name).label(end_tx_column_name)
        ],
        from_obj=v1.outerjoin(
            v2,
            sa.and_(
                getattr(v2.c, tx_column_name) ==
                tx_criterion
            )
        )
    ).order_by(getattr(v1.c, tx_column_name))
示例#43
0
文件: query.py 项目: her0e1c1/pystock
def latest_prices_by_codes(codes=[]):
    p1 = models.Price
    p2 = sql.alias(models.Price)
    with models.session_scope() as s:
        query = s.query(p1).outerjoin(p2, sql.and_(
            p1.quandl_code == p2.c.quandl_code,
            p1.date < p2.c.date,
        )).filter(
            p1.quandl_code.in_(codes) if codes else True,
            p2.c.date.is_(None),
        )
        df = pd.read_sql(query.statement, query.session.bind, index_col="quandl_code")
    return df
示例#44
0
def _get_consumer_by_uuid(ctx, uuid):
    # The SQL for this looks like the following:
    # SELECT
    #   c.id, c.uuid,
    #   p.id AS project_id, p.external_id AS project_external_id,
    #   u.id AS user_id, u.external_id AS user_external_id,
    #   c.updated_at, c.created_at
    # FROM consumers c
    # INNER JOIN projects p
    #  ON c.project_id = p.id
    # INNER JOIN users u
    #  ON c.user_id = u.id
    # WHERE c.uuid = $uuid
    consumers = sa.alias(CONSUMER_TBL, name="c")
    projects = sa.alias(project_obj.PROJECT_TBL, name="p")
    users = sa.alias(user_obj.USER_TBL, name="u")
    cols = [
        consumers.c.id,
        consumers.c.uuid,
        projects.c.id.label("project_id"),
        projects.c.external_id.label("project_external_id"),
        users.c.id.label("user_id"),
        users.c.external_id.label("user_external_id"),
        consumers.c.generation,
        consumers.c.updated_at,
        consumers.c.created_at
    ]
    c_to_p_join = sa.join(
        consumers, projects, consumers.c.project_id == projects.c.id)
    c_to_u_join = sa.join(
        c_to_p_join, users, consumers.c.user_id == users.c.id)
    sel = sa.select(cols).select_from(c_to_u_join)
    sel = sel.where(consumers.c.uuid == uuid)
    res = ctx.session.execute(sel).fetchone()
    if not res:
        raise exception.ConsumerNotFound(uuid=uuid)

    return dict(res)
示例#45
0
    def _build_query(self, table, filter_values):
        key_column = table.c[self.key]
        sum_query = sqlalchemy.alias(
            sqlalchemy.select(
                self.group_by + [sqlalchemy.func.sum(key_column).label('sum_col')] + [table.c.month],
                group_by=self.group_by + [table.c.month],
                whereclause=self.filter.build_expression(table),
            ), name='s')

        return select(
            self.group_by + [sqlalchemy.func.avg(sum_query.c.sum_col).label(self.key)],
            group_by=self.group_by,
            from_obj=sum_query
        ).params(filter_values)
示例#46
0
文件: table.py 项目: madprog/dataset
    def find(self, _limit=None, _offset=0, _step=5000,
             order_by='id', return_count=False, **_filter):
        """
        Performs a simple search on the table. Simply pass keyword arguments as ``filter``.
        ::

            results = table.find(country='France')
            results = table.find(country='France', year=1980)

        Using ``_limit``::

            # just return the first 10 rows
            results = table.find(country='France', _limit=10)

        You can sort the results by single or multiple columns. Append a minus sign
        to the column name for descending order::

            # sort results by a column 'year'
            results = table.find(country='France', order_by='year')
            # return all rows sorted by multiple columns (by year in descending order)
            results = table.find(order_by=['country', '-year'])

        For more complex queries, please use :py:meth:`db.query() <dataset.Database.query>`
        instead."""
        self._check_dropped()
        if not isinstance(order_by, (list, tuple)):
            order_by = [order_by]
        order_by = [o for o in order_by if (o.startswith('-') and o[1:] or o) in self.table.columns]
        order_by = [self._args_to_order_by(o) for o in order_by]

        args = self._args_to_clause(_filter)

        # query total number of rows first
        count_query = alias(self.table.select(whereclause=args, limit=_limit, offset=_offset),
                            name='count_query_alias').count()
        rp = self.database.executable.execute(count_query)
        total_row_count = rp.fetchone()[0]
        if return_count:
            return total_row_count

        if _limit is None:
            _limit = total_row_count

        if _step is None or _step is False or _step == 0:
            _step = total_row_count

        query = self.table.select(whereclause=args, limit=_limit,
                                  offset=_offset, order_by=order_by)
        return ResultIter(self.database.executable.execute(query),
                          row_type=self.database.row_type, step=_step)
示例#47
0
文件: project.py 项目: arbrandes/nova
def _get_project_by_external_id(ctx, external_id):
    projects = sa.alias(PROJECT_TBL, name="p")
    cols = [
        projects.c.id,
        projects.c.external_id,
        projects.c.updated_at,
        projects.c.created_at
    ]
    sel = sa.select(cols)
    sel = sel.where(projects.c.external_id == external_id)
    res = ctx.session.execute(sel).fetchone()
    if not res:
        raise exception.ProjectNotFound(external_id=external_id)

    return dict(res)
    def _mass_shifts_query_inner(self, session):
        anode = alias(ChromatogramTreeNode.__table__)
        bnode = alias(ChromatogramTreeNode.__table__)
        apeak = alias(DeconvolutedPeak.__table__)

        peak_join = apeak.join(
            ChromatogramTreeNodeToDeconvolutedPeak,
            ChromatogramTreeNodeToDeconvolutedPeak.c.peak_id == apeak.c.id)

        root_peaks_join = peak_join.join(
            anode,
            ChromatogramTreeNodeToDeconvolutedPeak.c.node_id == anode.c.id).join(
            ChromatogramToChromatogramTreeNode,
            ChromatogramToChromatogramTreeNode.c.node_id == anode.c.id)

        branch_peaks_join = peak_join.join(
            anode,
            ChromatogramTreeNodeToDeconvolutedPeak.c.node_id == anode.c.id).join(
            ChromatogramTreeNodeBranch,
            ChromatogramTreeNodeBranch.child_id == anode.c.id).join(
            bnode, ChromatogramTreeNodeBranch.parent_id == bnode.c.id).join(
            ChromatogramToChromatogramTreeNode,
            ChromatogramToChromatogramTreeNode.c.node_id == bnode.c.id)
        return root_peaks_join, branch_peaks_join, anode, bnode, apeak
示例#49
0
文件: user.py 项目: arbrandes/nova
def _get_user_by_external_id(ctx, external_id):
    users = sa.alias(USER_TBL, name="u")
    cols = [
        users.c.id,
        users.c.external_id,
        users.c.updated_at,
        users.c.created_at
    ]
    sel = sa.select(cols)
    sel = sel.where(users.c.external_id == external_id)
    res = ctx.session.execute(sel).fetchone()
    if not res:
        raise exception.UserNotFound(external_id=external_id)

    return dict(res)
示例#50
0
    def _build_query(self, table, filter_values):
        key_column = table.c[self.key]
        subquery = sqlalchemy.alias(
            sqlalchemy.select(
                self.group_by + [sqlalchemy.func.count(sqlalchemy.distinct(key_column)).label('count_unique')],
                group_by=self.group_by + [table.c.month],
                whereclause=self.filter.build_expression(table),
            ),
            name='cq')

        return sqlalchemy.select(
            self.group_by + [sqlalchemy.func.sum(subquery.c.count_unique).label(self.key)],
            group_by=self.group_by,
            from_obj=subquery
        ).params(filter_values)
示例#51
0
文件: query.py 项目: her0e1c1/pystock
def query_prices_by_codes(codes=[]):
    p1 = models.Price
    p2 = sql.alias(models.Price)
    with models.session_scope(expire_on_commit=False) as s:
        query = s.query(p1).join(p2, sql.and_(
            p1.quandl_code == p2.c.quandl_code,
            p1.date <= p2.c.date,
        )).filter(
            p1.quandl_code.in_(codes) if codes else True,
        ).group_by(
            p1.quandl_code,
            p1.date,
        ).having(
            sql.func.count("*") <= 2
        )
        # .order_by(models.Price.quandl_code, models.Price.date)
        return itertools.groupby(query.all(), key=lambda p: p.quandl_code)
示例#52
0
文件: task.py 项目: eoyilmaz/anima
    def fill(self):
        """fills the tree view with data
        """
        logger.debug('start filling tasks_treeView')
        logger.debug('creating a new model')
        if not self.project:
            from sqlalchemy import alias
            from stalker import Task, Project
            from stalker.db.session import DBSession
            # projects = Project.query.order_by(Project.name).all()
            inner_tasks = alias(Task.__table__)
            subquery = DBSession.query(inner_tasks.c.id).filter(
                inner_tasks.c.project_id == Project.id)
            query = DBSession\
                .query(
                    Project.id, Project.name, Project.entity_type,
                    Project.status_id,
                    subquery.exists().label('has_children')
                )
            if not self.show_completed_projects:
                from stalker import Status
                status_cmpl = \
                    Status.query.filter(Status.code == 'CMPL').first()
                query = query.filter(Project.status != status_cmpl)

            query = query.order_by(Project.name)
            projects = query.all()
        else:
            self.project.has_children = bool(self.project.tasks)
            projects = [self.project]

        logger.debug('projects: %s' % projects)

        # delete the old model if any
        if self.model() is not None:
            self.model().deleteLater()

        task_tree_model = TaskTreeModel()
        task_tree_model.populateTree(projects)
        self.setModel(task_tree_model)
        self.is_updating = False

        self.auto_fit_column()

        logger.debug('finished filling tasks_treeView')
示例#53
0
    def _filter_ancestors_of_node_helper(node):
      # Restrict ourselves to just those nodes within the same tree as node:
      tree_id = getattr(node, self.tree_id_field.name)
      filter_ = self.tree_id_field == tree_id

      # Restrict to ancestors, inclusive of node:
      alias = sqlalchemy.alias(options.table)
      left_field = self.left_field
      filter_ &= sqlalchemy.between(
        getattr(alias.c, self.left_field.name),
        self.left_field, self.right_field)
      filter_ &= getattr(alias.c, self.pk_field.name) == \
                 getattr(node,    self.pk_field.name)

      # Explicitly exclude node, if requested:
      if not include_self:
        filter_ &= self.pk_field != getattr(node, self.pk_field.name)

      # We're done!
      return filter_