def filter_single_by_time(cls, type, objects, year=None, week_number=None, day=None):

        assert (week_number and year) or day
        start_date, end_date = None, None

        if year and week_number:
            start_date, end_date = get_start_and_end_date_from_week_and_year(
                year,
                week_number
            )
        if day:
            start_date = day
            end_date = day

        objects = objects.filter(
            or_(
                tuple_(
                    cast(type.start_time, Date), cast(type.end_time, Date)
                ).op('overlaps')(
                    tuple_(
                        start_date, end_date
                    )
                ),
                or_(
                    # First range ends on the start date of the second
                    cast(type.end_time, Date) == start_date,
                    # Second range ends on the start date of the first
                    end_date == cast(type.start_time, Date)
                )
            )
        )

        return objects
    def get_repeating(self, resource, except_applications, start_date, end_date, statuses):
        query = current_app.db_session.query(RepeatingSlot)
        objects = query.filter(Application.resource == resource,
                               RepeatingSlot.application_id == Application.id,
                               Application.status.in_(statuses))

        if except_applications:
            objects = objects.filter(
                ~RepeatingSlot.application_id.in_(except_applications)
            )

        objects = objects.filter(
            or_(
                tuple_(
                    RepeatingSlot.start_date, RepeatingSlot.end_date
                ).op('overlaps')(
                    tuple_(
                        cast(start_date, Date), cast(end_date, Date)
                    )
                ),
                or_(
                    # First range ends on the start date of the second
                    RepeatingSlot.end_date == cast(start_date, Date),
                    # Second range ends on the start date of the first
                    cast(end_date, Date) == RepeatingSlot.start_date
                )
            )
        )

        return objects.all()
Esempio n. 3
0
 def populate_cache(self, stubs):
   """Fetch all mappings for objects in stubs, cache them in self.cache."""
   # Union is here to convince mysql to use two separate indices and
   # merge te results. Just using `or` results in a full-table scan
   # Manual column list avoids loading the full object which would also try to
   # load related objects
   cols = db.session.query(
       Relationship.source_type, Relationship.source_id,
       Relationship.destination_type, Relationship.destination_id)
   relationships = cols.filter(
       sa.tuple_(
           Relationship.source_type,
           Relationship.source_id
       ).in_(
           [(s.type, s.id) for s in stubs]
       )
   ).union_all(
       cols.filter(
           sa.tuple_(
               Relationship.destination_type,
               Relationship.destination_id
           ).in_(
               [(s.type, s.id) for s in stubs]
           )
       )
   ).all()
   for (src_type, src_id, dst_type, dst_id) in relationships:
     src = Stub(src_type, src_id)
     dst = Stub(dst_type, dst_id)
     # only store a neighbor if we queried for it since this way we know
     # we'll be storing complete neighborhood by the end of the loop
     if src in stubs:
       self.cache[src].add(dst)
     if dst in stubs:
       self.cache[dst].add(src)
Esempio n. 4
0
  def _get_revision_type_query(model, permission_type):
    """Filter model based on availability of related objects.

    This method is used only when quering revisions. In such case only
    revisions of objects user has right permission on should be returned. It
    means, user must have either right permission on object revision belongs
    to or in case it is revision of a relationship, user must have right
    permission on at least one part of the relationship.
    """
    allowed_resources = permissions.all_resources(permission_type)
    if not allowed_resources:
      return sa.false()

    return sa.or_(
        sa.tuple_(
            model.resource_type,
            model.resource_id,
        ).in_(
            allowed_resources,
        ),
        sa.tuple_(
            model.source_type,
            model.source_id,
        ).in_(
            allowed_resources,
        ),
        sa.tuple_(
            model.destination_type,
            model.destination_id,
        ).in_(
            allowed_resources,
        ),
    )
Esempio n. 5
0
    def test_tuple_containment(self):

        for test, exp in [
            ([('a', 'b')], True),
            ([('a', 'c')], False),
            ([('f', 'q'), ('a', 'b')], True),
            ([('f', 'q'), ('a', 'c')], False)
        ]:
            eq_(
                testing.db.execute(
                    select([
                        tuple_(
                            literal_column("'a'"),
                            literal_column("'b'")
                        ).
                        in_([
                            tuple_(*[
                                literal_column("'%s'" % letter)
                                for letter in elem
                            ]) for elem in test
                        ])
                    ])
                ).scalar(),
                exp
            )
Esempio n. 6
0
File: utils.py Progetto: 18F/openFEC
    def _fetch(self, last_index, sort_index=None, limit=None, eager=True):
        cursor = self.cursor
        direction = self.sort_column[1] if self.sort_column else sa.asc
        lhs, rhs = (), ()

        if sort_index is not None:
            left_index = self.sort_column[0]

            # Check if we're using a sort expression and if so, use the type
            # associated with it instead of deriving it from the column.
            if not self.sort_column[3]:
                comparator = self.max_column_map.get(
                    str(left_index.property.columns[0].type).lower()
                )
            else:
                comparator = self.max_column_map.get(self.sort_column[5])

            left_index = sa.func.coalesce(left_index, comparator)
            lhs += (left_index,)
            rhs += (sort_index,)

        if last_index is not None:
            lhs += (self.index_column,)
            rhs += (last_index,)

        lhs = sa.tuple_(*lhs)
        rhs = sa.tuple_(*rhs)

        if rhs.clauses:
            filter = lhs > rhs if direction == sa.asc else lhs < rhs
            cursor = cursor.filter(filter)

        query = cursor.order_by(direction(self.index_column)).limit(limit)
        return query.all() if eager else query
 def get_repeating_slots(cls, resource, start_date, end_date, week_day, start_time, end_time):
     query = current_app.db_session.query(RepeatingSlot)
     statuses = ["Granted"]
     objects = query.filter(
         Application.resource == resource,
         RepeatingSlot.application_id == Application.id,
         Application.status.in_(statuses),
     )
     objects = objects.filter(
         or_(
             tuple_(RepeatingSlot.start_date, RepeatingSlot.end_date).op("overlaps")(
                 tuple_(cast(start_date, Date), cast(end_date, Date))
             ),
             or_(
                 # First range ends on the start date of the second
                 RepeatingSlot.end_date == cast(start_date, Date),
                 # Second range ends on the start date of the first
                 cast(end_date, Date) == RepeatingSlot.start_date,
             ),
         ),
         tuple_(RepeatingSlot.start_time, RepeatingSlot.end_time).op("overlaps")(
             tuple_(cast(start_time, Time), cast(end_time, Time))
         ),
         and_(week_day == RepeatingSlot.week_day),
     )
     return objects.all()
    def get_arrangements_slots(self, resource, start_date, end_date):  # start_time, end_time, week_day
        query = current_app.db_session.query(Slot)
        statuses = ["Granted"]
        objects = query.filter(Application.resource == resource,
                               Slot.application_id == Application.id,
                               Application.status.in_(statuses),
                               Application.is_arrangement == True)

        objects = objects.filter(
            or_(
                tuple_(
                    cast(Slot.start_time, Date), cast(Slot.end_time, Date)
                ).op('overlaps')(
                    tuple_(
                        cast(start_date, Date), cast(end_date, Date)
                    )
                ),
                or_(
                    # First range ends on the start date of the second
                    cast(Slot.end_time, Date) == cast(start_date, Date),
                    # Second range ends on the start date of the first
                    cast(end_date, Date) == cast(Slot.start_time, Date)
                )
            )
        )
        return objects.order_by(Slot.start_time).all()
Esempio n. 9
0
    def test_tuple_containment(self):

        for test, exp in [
            ([("a", "b")], True),
            ([("a", "c")], False),
            ([("f", "q"), ("a", "b")], True),
            ([("f", "q"), ("a", "c")], False),
        ]:
            eq_(
                testing.db.execute(
                    select(
                        [
                            tuple_(
                                literal_column("'a'"), literal_column("'b'")
                            ).in_(
                                [
                                    tuple_(
                                        *[
                                            literal_column("'%s'" % letter)
                                            for letter in elem
                                        ]
                                    )
                                    for elem in test
                                ]
                            )
                        ]
                    )
                ).scalar(),
                exp,
            )
    def get_requested_slots(self, resource, except_applications, start_date, end_date):
        query = current_app.db_session.query(RepeatingSlotRequest)

        objects = query.filter(
            RepeatingSlotRequest.application.has(resource_id=resource.id)
        )

        objects = objects.filter(
            RepeatingSlotRequest.application.has(status="Pending")
        )

        if except_applications:
            objects = objects.filter(
                ~RepeatingSlotRequest.application_id.in_(except_applications)
            )

        objects = objects.filter(
            or_(
                tuple_(
                    RepeatingSlotRequest.start_date, RepeatingSlotRequest.end_date
                ).op('overlaps')(
                    tuple_(
                        cast(start_date, Date), cast(end_date, Date)
                    )
                ),
                or_(
                    # First range ends on the start date of the second
                    RepeatingSlotRequest.end_date == cast(start_date, Date),
                    # Second range ends on the start date of the first
                    cast(end_date, Date) == RepeatingSlotRequest.start_date
                )
            )
        )

        return objects.all()
    def filter_repeating_by_time(cls, type, objects, year=None, week_number=None, day=None):

        assert (week_number and year) or day
        start_date, end_date = None, None

        if year and week_number:
            start_date, end_date = get_start_and_end_date_from_week_and_year(
                year,
                week_number
            )
        if day:
            start_date = day
            end_date = day
            objects = objects.filter(
                type.week_day == day.isoweekday()
            )

        objects = objects.filter(
            or_(
                tuple_(
                    type.start_date, type.end_date
                ).op('overlaps')(
                    tuple_(
                        start_date, end_date
                    )
                ),
                or_(
                    # First range ends on the start date of the second
                    type.end_date == start_date,
                    # Second range ends on the start date of the first
                    end_date == type.start_date
                )
            )
        )
        return objects
Esempio n. 12
0
def get_all_orders(email=None):
    session = Db.instance().session
    # Create subquery for retrieving the last state of each order
    subquery = session.query(
        Order_state.order_id, func.max(Order_state.date)
    ).group_by(Order_state.order_id).subquery()
    if email != None:
        return Order().queryObject().join(Order_state).filter(and_(
        tuple_(Order_state.order_id, Order_state.date).in_(subquery),
        Order.user.has(email=email))).order_by(Order_state.state,\
        desc(Order_state.date)).all()
    else:
        return Order().queryObject().join(Order_state).filter(
        tuple_(Order_state.order_id, Order_state.date).in_(subquery)).\
        order_by(Order_state.state, desc(Order_state.date)).all()
Esempio n. 13
0
  def _remove_lost_snapshot_mappings(self):
    """Remove mappings between snapshots if base objects were unmapped."""
    source_snap = sa.orm.aliased(all_models.Snapshot, name="source_snap")
    dest_snap = sa.orm.aliased(all_models.Snapshot, name="dest_snap")
    source_rel = sa.orm.aliased(all_models.Relationship, name="source_rel")
    dest_rel = sa.orm.aliased(all_models.Relationship, name="dest_rel")

    parents = {(p.type, p.id) for p in self.parents}
    lost_rel_ids = db.session.query(all_models.Relationship.id).join(
        source_snap, source_snap.id == all_models.Relationship.source_id
    ).join(
        dest_snap, dest_snap.id == all_models.Relationship.destination_id
    ).outerjoin(
        source_rel,
        sa.and_(
            source_rel.source_type == source_snap.child_type,
            source_rel.source_id == source_snap.child_id,
            source_rel.destination_type == dest_snap.child_type,
            source_rel.destination_id == dest_snap.child_id,
        )
    ).outerjoin(
        dest_rel,
        sa.and_(
            dest_rel.destination_type == source_snap.child_type,
            dest_rel.destination_id == source_snap.child_id,
            dest_rel.source_type == dest_snap.child_type,
            dest_rel.source_id == dest_snap.child_id,
        )
    ).filter(
        all_models.Relationship.source_type == 'Snapshot',
        all_models.Relationship.destination_type == 'Snapshot',
        source_rel.id.is_(None),
        dest_rel.id.is_(None),
        sa.tuple_(
            source_snap.parent_type,
            source_snap.parent_id,
        ).in_(parents),
        sa.tuple_(
            dest_snap.parent_type,
            dest_snap.parent_id,
        ).in_(parents)
    )

    lost_rels = all_models.Relationship.query.filter(
        all_models.Relationship.id.in_(lost_rel_ids)
    )
    for rel in lost_rels:
      db.session.delete(rel)
Esempio n. 14
0
def _get_acl_filter():
  """Get filter for acl entries.

  This creates a filter to select only acl entries for objects that were
  specified in the request json.

  If this filter is used we must not store the results of the permissions dict
  into memcache.

  Returns:
    list of filter statements.
  """
  stubs = getattr(flask.g, "referenced_object_stubs", {})
  if not stubs:
    return []
  roleable_models = {m.__name__ for m in all_models.all_models
                     if issubclass(m, Roleable)}
  keys = [(type_, id_)
          for type_, ids in stubs.iteritems()
          for id_ in ids
          if type_ in roleable_models]
  if not keys:
    return []
  return [
      sa.tuple_(
          all_models.AccessControlList.object_type,
          all_models.AccessControlList.object_id,
      ).in_(
          keys,
      )
  ]
  def setUp(self):
    super(TestIssueTrackerIntegrationPeople, self).setUp()
    self.generator = generator.ObjectGenerator()

    factories.AccessControlRoleFactory(
        name='Custom Role',
        internal=False,
        object_type='Assessment',
    )

    # fetch all roles mentioned in self.EMAILS
    self.roles = {
        role.name: role
        for role in all_models.AccessControlRole.query.filter(
            sa.tuple_(
                all_models.AccessControlRole.object_type,
                all_models.AccessControlRole.name,
            ).in_(
                self.EMAILS.keys(),
            ),
        )
    }

    with factories.single_commit():
      self.audit = factories.AuditFactory()

      self.people = {
          role_name: [factories.PersonFactory(email=email)
                      for email in emails]
          for (_, role_name), emails in self.EMAILS.iteritems()
      }
 def objects_unioning_dates(cls, objects, start_date, end_date):
     return objects.filter(or_(
         tuple_(
             cls.t.start_date, cls.t.end_date
         ).op('overlaps')(
             tuple_(
                 start_date, end_date
             )
         ),
         or_(
             # First range ends on the start date of the second
             cls.t.end_date == start_date,
             # Second range ends on the start date of the first
             end_date == cls.t.start_date
         )
     ))
Esempio n. 17
0
def get_query_for_ids(modelquery, model, ids):
    """
        Return a query object filtered by primary key values passed in `ids` argument.

        Unfortunately, it is not possible to use `in_` filter if model has more than one
        primary key.
    """
    if has_multiple_pks(model):
        # Decode keys to tuples
        decoded_ids = [iterdecode(v) for v in ids]

        # Get model primary key property references
        model_pk = [getattr(model, name) for name in get_primary_key(model)]

        try:
            query = modelquery.filter(tuple_(*model_pk).in_(decoded_ids))
            # Only the execution of the query will tell us, if the tuple_
            # operator really works
            query.all()
        except DBAPIError:
            query = modelquery.filter(tuple_operator_in(model_pk, decoded_ids))
    else:
        model_pk = getattr(model, get_primary_key(model))
        query = modelquery.filter(model_pk.in_(ids))

    return query
Esempio n. 18
0
 def _key_conditions(self, keys):
     vals = []
     for key in keys:
         row = self._key_orm.to_row(key)
         val = tuple(row[c] for c in self._key_cols)
         vals.append(val)
     return tuple_(*self._key_cols).in_(vals)
Esempio n. 19
0
    def test_expanding_in_composite(self):
        testing.db.execute(
            users.insert(),
            [
                dict(user_id=7, user_name='jack'),
                dict(user_id=8, user_name='fred'),
                dict(user_id=9, user_name=None)
            ]
        )

        with testing.db.connect() as conn:
            stmt = select([users]).where(
                tuple_(
                    users.c.user_id,
                    users.c.user_name
                ).in_(bindparam('uname', expanding=True))
            ).order_by(users.c.user_id)

            eq_(
                conn.execute(stmt, {"uname": [(7, 'jack')]}).fetchall(),
                [(7, 'jack')]
            )

            eq_(
                conn.execute(stmt, {"uname": [(7, 'jack'), (8, 'fred')]}).fetchall(),
                [(7, 'jack'), (8, 'fred')]
            )
Esempio n. 20
0
def get_db_records(pids):
    """Get an iterator on record metadata from the DB.

    Args:
        pids (Iterable[Tuple[str, Union[str, int]]): a list of (pid_type, pid_value) tuples.

    Yields:
        dict: metadata of a record found in the database.

    Warning:
        The order in which records are returned is different from the order of
        the input.
    """
    pids = [(pid_type, str(pid_value)) for (pid_type, pid_value) in pids]

    if not pids:
        return

    query = RecordMetadata.query.join(
        PersistentIdentifier, RecordMetadata.id == PersistentIdentifier.object_uuid
    ).filter(
        PersistentIdentifier.object_type == 'rec',  # So it can use the 'idx_object' index
        tuple_(PersistentIdentifier.pid_type, PersistentIdentifier.pid_value).in_(pids)
    )

    for record in query.yield_per(100):
        yield record.json
Esempio n. 21
0
def get_query_for_ids(modelquery, model, ids):
    """
        Return a query object, that contains all entities of the given model for
        the primary keys provided in the ids-parameter.

        The ``pks`` parameter is a tuple, that contains the different primary key values,
        that should be returned. If the primary key of the model consists of multiple columns
        every entry of the ``pks`` parameter must be a tuple containing the columns-values in the
        correct order, that make up the primary key of the model

        If the model has multiple primary keys, the
        `tuple_ <http://docs.sqlalchemy.org/en/latest/core/expression_api.html#sqlalchemy.sql.expression.tuple_>`_
        operator will be used. As this operator does not work on certain databases,
        notably on sqlite, a workaround function :func:`tuple_operator_in` is provided
        that implements the same logic using OR and AND operations.

        When having multiple primary keys, the pks are provided as a list of tuple-look-alike-strings,
        ``[u'(1, 2)', u'(1, 1)']``. These needs to be evaluated into real tuples, where
        `Stackoverflow Question 3945856 <http://stackoverflow.com/questions/3945856/converting-string-to-tuple-and-adding-to-tuple>`_
        pointed to `Literal Eval <http://docs.python.org/2/library/ast.html#ast.literal_eval>`_, which is now used.
    """
    if has_multiple_pks(model):
        model_pk = [getattr(model, pk_name).expression for pk_name in get_primary_key(model)]
        ids = [literal_eval(id) for id in ids]
        try:
            query = modelquery.filter(tuple_(*model_pk).in_(ids))
            # Only the execution of the query will tell us, if the tuple_
            # operator really works
            query.all()
        except DBAPIError:
            query = modelquery.filter(tuple_operator_in(model_pk, ids))
    else:
        model_pk = getattr(model, get_primary_key(model))
        query = modelquery.filter(model_pk.in_(ids))
    return query
Esempio n. 22
0
    def __eq__(self, other):
        """
        Compare the userid for equality with `other`.

        `other` can be anything plausibly on the RHS of a comparison, which
        can include other SQL clause elements or expressions, as in

            User.userid == sa.tuple_(User.username, Group.authority)

        or literals, as in

            User.userid == 'acct:[email protected]'

        We treat the literal case specially, and split the string into
        username and authority ourselves. If the string is not a well-formed
        userid, the comparison will always return False.
        """
        if isinstance(other, string_types):
            try:
                val = split_user(other)
            except ValueError:
                # The value being compared isn't a valid userid
                return False
            else:
                other = sa.tuple_(_normalise_username(val['username']),
                                  val['domain'])
        return self.__clause_element__() == other
 def test_tuple(self):
     expr = self.parser(
         sa.tuple_(self.User.name, 3).in_([(u'someone', 3)])
     )
     assert str(expr) == (
         '(category.name, :param_1) IN ((:param_2, :param_3))'
     )
Esempio n. 24
0
def get_all_processed_orders(login=None):
    session = Db.instance().session
    # Create subquery for retrieving the last state of each order
    subquery = session.query(
        Order_state.order_id, func.max(Order_state.date)
    ).group_by(Order_state.order_id).subquery()
    if login != None:
        return Order().queryObject().join(Order_state).filter(and_(
        tuple_(Order_state.order_id, Order_state.date).in_(subquery),
        Order.user.has(login=login), Order_state.state == Order_state._STATES
        [len(Order_state._STATES) - 1])).order_by(Order_state.state,\
        desc(Order_state.date)).all()
    else:
        return Order().queryObject().join(Order_state).filter(and_(
        tuple_(Order_state.order_id, Order_state.date).in_(subquery)),
        Order_state.state == Order_state._STATES[len(Order_state._STATES)\
        - 1]).order_by(Order_state.state, desc(Order_state.date)).all()
Esempio n. 25
0
 def _fetch(self, last_index, sort_index=None, eager=True):
     cursor, limit = self.cursor, self.per_page
     lhs, rhs = (), ()
     direction = self.sort_column[1] if self.sort_column else sa.asc
     if sort_index is not None:
         lhs += (self.sort_column[0], )
         rhs += (sort_index, )
     if last_index is not None:
         lhs += (self.index_column, )
         rhs += (last_index, )
     lhs = sa.tuple_(*lhs)
     rhs = sa.tuple_(*rhs)
     if rhs.clauses:
         filter = lhs > rhs if direction == sa.asc else lhs < rhs
         cursor = cursor.filter(filter)
     query = cursor.order_by(direction(self.index_column)).limit(limit)
     return query.all() if eager else query
    def get_blocked_time_intervals(cls, resource, start_date, end_date, week_day, start_time, end_time):
        def get_next_weekday(start_date, week_day):
            days_ahead = week_day - start_date.isoweekday()
            if days_ahead < 0:  # Target day already happened this week
                days_ahead += 7
            return start_date + timedelta(days=days_ahead)

        def get_previous_weekday(end_date, week_day):
            days_behind = week_day - end_date.isoweekday()
            if days_behind > 0:  # Target day already happened this week
                days_behind -= 7
            return end_date + timedelta(days=days_behind)

        # adjust start and end date to match week day
        start_date = get_next_weekday(start_date, week_day)
        end_date = get_previous_weekday(end_date, week_day)

        # if start_date is after end_date, there is no period to check against anymore
        if start_date > end_date:
            return []

        start_datetime = datetime.combine(start_date, start_time)
        end_datetime = datetime.combine(end_date, end_time)
        query = current_app.db_session.query(BlockedTimeInterval)
        objects = query.filter(BlockedTimeInterval.resource == resource)

        objects = objects.filter(
            or_(
                tuple_(BlockedTimeInterval.start_time, BlockedTimeInterval.end_time).op("overlaps")(
                    tuple_(cast(start_datetime, DateTime), cast(end_datetime, DateTime))
                )
            ),
            or_(
                (
                    func.LEAST(cast(end_datetime, Date), cast(BlockedTimeInterval.end_time, Date))
                    - func.GREATEST(cast(start_datetime, Date), cast(BlockedTimeInterval.start_time, Date))
                )
                >= 6,
                text(
                    "EXISTS (SELECT 1 FROM (SELECT EXTRACT(ISODOW FROM generate_series(GREATEST(:start_date, blocked_time_intervals.start_time), LEAST(:end_date, blocked_time_intervals.end_time), CAST('1 day' as INTERVAL))) AS weekday) as weekdays WHERE weekdays.weekday = :week_day)"
                ).params(start_date=start_datetime, end_date=end_datetime, week_day=week_day),
            ),
        )

        return objects.all()
Esempio n. 27
0
    def test_bound_in_two_tuple(self):
        table = self.tables.some_table

        stmt = select([table.c.id]).where(
            tuple_(table.c.x, table.c.y).in_(bindparam('q', expanding=True)))

        self._assert_result(
            stmt,
            [(2, ), (3, ), (4, )],
            params={"q": [(2, 3), (3, 4), (4, 5)]},
        )
Esempio n. 28
0
def get_all_new_orders():
    session = Db.instance().session
    # First we create a subquery for retrieving the last state of each order
    subquery = session.query(
        Order_state.order_id, func.max(Order_state.date)
    ).group_by(Order_state.order_id).subquery()
    # Filter orders by checking which ones have a last state with state 0
    return Order().queryObject().join(Order_state).filter(and_(
        tuple_(Order_state.order_id, Order_state.date).in_(subquery),
        Order_state.state == Order_state._STATES[0])).order_by(
        desc(Order_state.date)).all()
Esempio n. 29
0
    def test_bound_in_heterogeneous_two_tuple(self):
        table = self.tables.some_table

        stmt = select([table.c.id]).where(
            tuple_(table.c.x, table.c.z).in_(
                bindparam('q', expanding=True))).order_by(table.c.id)

        self._assert_result(
            stmt,
            [(2, ), (3, ), (4, )],
            params={"q": [(2, "z2"), (3, "z3"), (4, "z4")]},
        )
Esempio n. 30
0
    def test_empty_homogeneous_tuples(self):
        table = self.tables.some_table

        stmt = select([table.c.id]).where(
            tuple_(table.c.x, table.c.y).in_(
                bindparam('q', expanding=True))).order_by(table.c.id)

        self._assert_result(
            stmt,
            [],
            params={"q": []},
        )
Esempio n. 31
0
def _get_objects_from_deleted(aggregate_deleted, aggregate_field):
    """Get objects with deleted source.

  This function returns all objects that have one computed value that belongs
  to a currently deleted aggregate object.
  """
    if not aggregate_deleted:
        return set()
    return set(
        db.session.query(
            models.Attributes.object_type,
            models.Attributes.object_id,
        ).filter(
            sa.tuple_(
                models.Attributes.source_type,
                models.Attributes.source_id,
            ).in_(aggregate_deleted),
            models.Attributes.source_attr == aggregate_field,
        ))
    def delete_old_records(
        cls, task_id: str, dag_id: str,
        num_to_keep=conf.getint("core", "max_num_rendered_ti_fields_per_task", fallback=0),
        session: Session = None
    ):
        """
        Keep only Last X (num_to_keep) number of records for a task by deleting others

        :param task_id: Task ID
        :param dag_id: Dag ID
        :param num_to_keep: Number of Records to keep
        :param session: SqlAlchemy Session
        """
        if num_to_keep <= 0:
            return

        # Fetch Top X records given dag_id & task_id ordered by Execution Date
        subq1 = (
            session
            .query(cls.dag_id, cls.task_id, cls.execution_date)
            .filter(cls.dag_id == dag_id, cls.task_id == task_id)
            .order_by(cls.execution_date.desc())
            .limit(num_to_keep)
            .subquery('subq1')
        )

        # Second Subquery
        # Workaround for MySQL Limitation (https://stackoverflow.com/a/19344141/5691525)
        # Limitation: This version of MySQL does not yet support
        # LIMIT & IN/ALL/ANY/SOME subquery
        subq2 = (
            session
            .query(subq1.c.dag_id, subq1.c.task_id, subq1.c.execution_date)
            .subquery('subq2')
        )

        session.query(cls) \
            .filter(and_(
                cls.dag_id == dag_id,
                cls.task_id == task_id,
                tuple_(cls.dag_id, cls.task_id, cls.execution_date).notin_(subq2))) \
            .delete(synchronize_session=False)
Esempio n. 33
0
def get_pairwise_test(corr_complete, query_units, ife_list):
    bps_comb = []
    for a in range(0, len(corr_complete)):
        bps_comb.append([(map(str, comb))
                         for comb in combinations(corr_complete[a], 2)])

    unit1 = []
    unit2 = []
    bpair = []
    bstack = []
    bphosphate = []
    bribose = []

    for a in range(0, len(corr_complete)):
        bps_list = UnitPairInteractions.query.filter(
            tuple_(UnitPairInteractions.unit_id_1, UnitPairInteractions.unit_id_2) \
                .in_(bps_comb[a]))

        for row in bps_list:
            unit1.append(row.unit_id_1)
            unit2.append(row.unit_id_2)
            bpair.append(row.f_lwbp)
            bstack.append(row.f_stacks)
            bphosphate.append(row.f_bphs)
            bribose.append(row.f_brbs)

        pairwise_bpair = zip(unit1, unit2, bpair)
        pairwise_bstack = zip(unit1, unit2, bstack)
        pairwise_bribose = zip(unit1, unit2, bribose)
        pairwise_bphosphate = zip(unit1, unit2, bphosphate)

    bpair_annotation, bpair_numbering = process_annotation(
        pairwise_bpair, ife_list)
    bstack_annotation, bstack_numbering = process_annotation(
        pairwise_bstack, ife_list)
    bribose_annotation, bribose_numbering = process_annotation(
        pairwise_bribose, ife_list)
    bphosphate_annotation, bphosphate_numbering = process_annotation(
        pairwise_bphosphate, ife_list)

    return bpair_annotation, bpair_numbering, bstack_annotation, bstack_numbering, \
           bribose_annotation, bribose_numbering, bphosphate_annotation, bphosphate_numbering
Esempio n. 34
0
def get_top(condition,
            session,
            header,
            field_name,
            icon,
            user_id,
            additional_filter=text_('')):
    actual_profiles = session.query(Character.user_id, func.max(Character.date)). \
        group_by(Character.user_id)
    actual_profiles = actual_profiles.all()
    characters = session.query(Character).filter(tuple_(Character.user_id, Character.date)
                                                 .in_([(a[0], a[1]) for a in actual_profiles]),
                                                 Character.date > datetime.now() - timedelta(days=7),
                                                 additional_filter)\
        .order_by(condition)
    if CASTLE:
        characters = characters.filter_by(castle=CASTLE)
    characters = characters.all()
    text = header
    str_format = MSG_TOP_FORMAT
    for i in range(min(10, len(characters))):
        text += str_format.format(i + 1, characters[i].name,
                                  characters[i].level,
                                  getattr(characters[i], field_name), icon)
    if user_id in [character.user_id for character in characters]:
        if user_id not in [character.user_id for character in characters[:10]]:
            for i in range(10, len(characters)):
                if characters[i].user_id == user_id:
                    text += '...\n'
                    text += str_format.format(
                        i, characters[i - 1].name, characters[i - 1].level,
                        getattr(characters[i - 1], field_name), icon)
                    text += str_format.format(
                        i + 1, characters[i].name, characters[i].level,
                        getattr(characters[i], field_name), icon)
                    if i != len(characters) - 1:
                        text += str_format.format(
                            i + 2, characters[i + 1].name,
                            characters[i + 1].level,
                            getattr(characters[i + 1], field_name), icon)
                    break
    return text
Esempio n. 35
0
def _mark_env_images_that_can_be_removed(
    project_uuid: Optional[str] = None,
    environment_uuid: Optional[str] = None,
    latest_can_be_removed: bool = False,
) -> None:
    """
    Args:
        project_uuid:
        environment_uuid:
        latest_can_be_removed: If True "latest" images for an
            environment will be considerable as removable. This is only
            useful when deleting an environment. Be absolutely sure of
            what you are doing if you set this to True. If this is set
            to True project_uuid and environment_uuid must be passed as
            well.
    """
    logger.info("Marking environment images for removal.")

    # Migrate old env images, will happen only once.
    imgs_to_migrate = models.EnvironmentImage.query.filter(
        models.EnvironmentImage.digest == "Undefined", ).all()
    for img in imgs_to_migrate:
        digest = registry.get_manifest_digest(
            _config.ENVIRONMENT_IMAGE_NAME.format(
                project_uuid=img.project_uuid,
                environment_uuid=img.environment_uuid),
            img.tag,
        )
        if digest is not None:
            img.digest = digest

    imgs = _env_images_that_can_be_deleted(project_uuid, environment_uuid,
                                           latest_can_be_removed)

    # Bulk update "marked_for_removal".
    models.EnvironmentImage.query.filter(
        tuple_(
            models.EnvironmentImage.project_uuid,
            models.EnvironmentImage.environment_uuid,
            models.EnvironmentImage.tag,
        ).in_([(img.project_uuid, img.environment_uuid, img.tag)
               for img in imgs])).update({"marked_for_removal": True})
Esempio n. 36
0
def add_all(session, checklist, new_transactions):
    """
    for any pair of users, there should be at most a single transaction entity between them per checklist.
    so instead of simply persisting the new transactions, we check for existing ones with matching users.
    if we find matches, we update the old transactions. otherwise, we add the new ones.
    """
    user_tuples = list(map(lambda transaction: (transaction.giver_id, transaction.receiver_id), new_transactions))
    # we need to find matches in both directions of giver/receiver, so we add the inverse tuples to themselves
    user_tuples += [user_tuple[::-1] for user_tuple in user_tuples]

    old_transactions = session \
        .query(Transaction) \
        .filter(Transaction.checklist == checklist,
                tuple_(Transaction.giver_id, Transaction.receiver_id).in_(user_tuples)) \
        .all()

    for new_transaction in new_transactions:
        _merge_data(session, new_transaction, old_transactions)

    session.commit()
Esempio n. 37
0
    def load_many(cls, idents, session, cache=None):
        idents = [unflatten(it) for it in idents]

        if cache is not None:
            names = [cls.format_cache_key(it) for it in idents]
            values = cache.get_many(names)
            pairs = zip(idents, values)
            results = {it: v for (it, v) in pairs if it is not None}
            remainders = [it for it in idents if it not in results]
        else:
            results = dict()
            remainders = idents

        if remainders:
            tbl = cls.get_table()
            cond = tuple_(*tbl.primary_key).in_(remainders)
            query = session.query(cls).filter(cond)
            for o in query.all():
                results[o.get_identity(flat=False)] = o
        return [results.get(it) for it in idents]
Esempio n. 38
0
def remove_related_acl(related_to_del):
  """Remove related ACL records on related object delete.

  Args:
      related_to_del: mapping related object type to set of ids to delete
          {
            related_object_type_name1: set(related_id1, ...),
            related_object_type_name2: set(related_id1, ...)
            ...
          }
  """
  if not related_to_del:
    return

  db.session.query(all_models.AccessControlList).filter(
      sa.tuple_(
          all_models.AccessControlList.object_type,
          all_models.AccessControlList.object_id,
      ).in_(related_to_del)
  ).delete(synchronize_session='fetch')
Esempio n. 39
0
def custom_attributes_cache(notifications):
    """Compile and return Custom Attributes

  Args:
    notifications: a list of Notification instances for which to fetch the
      corresponding CAds instances
  Returns:
    Dictionary containing all custom attributes with a definition type as a key
  """
    ca_cache = defaultdict(list)
    definitions = models.CustomAttributeDefinition.query.filter(
        sa.tuple_(models.CustomAttributeDefinition.definition_type,
                  models.CustomAttributeDefinition.definition_id).in_([
                      (notification.object_type, notification.object_id)
                      for notification in notifications
                  ]))
    for attr in definitions:
        ca_cache[attr.definition_type].append(attr)

    return ca_cache
Esempio n. 40
0
def get_snapshot_data(affected_objects):
  """Get data needed for indexing snapshot values."""
  all_objects = set()
  for objects in affected_objects.itervalues():
    all_objects.update(objects)

  if not all_objects:
    return set(), set()

  query = db.session.query(
      models.Snapshot.child_type,
      models.Snapshot.child_id,
      models.Snapshot.id
  ).filter(
      sa.tuple_(
          models.Snapshot.child_type,
          models.Snapshot.child_id,
      ).in_(all_objects)
  )
  all_ids = []
  snapshot_map = collections.defaultdict(list)
  for computed_type, computed_id, snapshot_id in query:
    snapshot_map[(computed_type, computed_id)].append(snapshot_id)
    all_ids.append(snapshot_id)

  snapshot_tag_map = {}
  if all_ids:
    query = db.session.query(
        models.Snapshot.id,
        sa.func.concat_ws(
            "-",
            models.Snapshot.parent_type,
            models.Snapshot.parent_id,
            models.Snapshot.child_type,
        )
    ).filter(
        models.Snapshot.id.in_(all_ids)
    )
    snapshot_tag_map = dict(query)

  return snapshot_map, snapshot_tag_map
def generate_squad_members(members, session):
    inline_keys = []
    user_ids = []
    for member in members:
        user_ids.append(member.user_id)
    actual_profiles = session.query(Character.user_id, func.max(Character.date)). \
        filter(Character.user_id.in_(user_ids)). \
        group_by(Character.user_id).all()
    characters = session.query(Character).filter(tuple_(Character.user_id, Character.date)
                                                 .in_([(a[0], a[1]) for a in actual_profiles]))\
        .order_by(Character.level.desc()).all()
    for character in characters:
        time_passed = datetime.now() - character.date
        status_emoji = '❇'
        if time_passed > timedelta(days=7):
            status_emoji = '⁉'
        elif time_passed > timedelta(days=4):
            status_emoji = '‼'
        elif time_passed > timedelta(days=3):
            status_emoji = '❗'
        elif time_passed < timedelta(days=1):
            status_emoji = '🕐'
        inline_keys.append([
            InlineKeyboardButton('{}{}: {}⚔ {}🛡 {}🏅'.format(
                status_emoji, character.name, character.attack,
                character.defence, character.level),
                                 callback_data=json.dumps({
                                     't':
                                     QueryType.ShowHero.value,
                                     'id':
                                     character.user_id,
                                     'b':
                                     True
                                 }))
        ])
    inline_keys.append([
        InlineKeyboardButton(MSG_BACK,
                             callback_data=json.dumps(
                                 {'t': QueryType.SquadList.value}))
    ])
    return InlineKeyboardMarkup(inline_keys)
Esempio n. 42
0
 def _stmt_where_region_is_any_of_mutations(
         *mutations: Mutation,
         from_table,
         select_expression,
         only_item_id_in_table: Optional[Table] = None):
     """
     :param mutations:
     :param from_table:
     :param select_expression:
     :param only_item_id_in_table: a table containing a column of item_id. When present, the individuals having any
     of the given mutations is further filtered by considering only the ones in this table.
     :return: the SQL statement querying all the regions from the given source region table, where the regions
     matches one of the given mutations. The returned query selects only the properties given in select_expression.
     """
     mutations_having_id = [mut for mut in mutations if mut.id is not None]
     mutations_without_id = [mut for mut in mutations if mut.id is None]
     first_select, second_select = None, None
     if len(mutations_having_id) > 0:
         first_select = select_expression.where(
             from_table.c.id.in_([mut.id for mut in mutations_having_id]))
         if only_item_id_in_table is not None:
             first_select = first_select.where(
                 from_table.c.item_id.in_(
                     select([only_item_id_in_table.c.item_id])))
     if len(mutations_without_id) > 0:
         second_select = select_expression.where(
             tuple_(from_table.c.start, from_table.c.ref, from_table.c.alt,
                    from_table.c.chrom).in_([
                        (mut.start, mut.ref, mut.alt, mut.chrom)
                        for mut in mutations_without_id
                    ]))
         if only_item_id_in_table is not None:
             second_select = second_select.where(
                 from_table.c.item_id.in_(
                     select([only_item_id_in_table.c.item_id])))
     if first_select is not None and second_select is not None:
         return union_all(first_select, second_select)
     elif first_select is not None:
         return first_select
     else:
         return second_select
Esempio n. 43
0
        def update_ti_hostname_with_count(count, sensor_works):
            # Using or_ instead of in_ here to prevent from full table scan.
            if session.bind.dialect.name == 'mssql':
                ti_filter = or_(
                    and_(
                        TI.dag_id == ti_key.dag_id,
                        TI.task_id == ti_key.task_id,
                        DR.execution_date == ti_key.execution_date,
                    ) for ti_key in sensor_works)
            else:
                ti_keys = [(x.dag_id, x.task_id, x.execution_date)
                           for x in sensor_works]
                ti_filter = or_(
                    tuple_(TI.dag_id, TI.task_id, DR.execution_date) == ti_key
                    for ti_key in ti_keys)

            for ti in session.query(TI).join(TI.dag_run).filter(ti_filter):
                ti.hostname = self.hostname
            session.commit()

            return count + len(sensor_works)
Esempio n. 44
0
def tuple_in_condition(
    columns: Tuple[ColumnElement, ...],
    collection: Iterable[Any],
) -> ColumnOperators:
    """Generates a tuple-in-collection operator to use in ``.filter()``.

    For most SQL backends, this generates a simple ``([col, ...]) IN [condition]``
    clause. This however does not work with MSSQL, where we need to expand to
    ``(c1 = v1a AND c2 = v2a ...) OR (c1 = v1b AND c2 = v2b ...) ...`` manually.

    :meta private:
    """
    if settings.engine.dialect.name != "mssql":
        return tuple_(*columns).in_(collection)
    clauses = [
        and_(*(c == v for c, v in zip(columns, values)))
        for values in collection
    ]
    if not clauses:
        return false()
    return or_(*clauses)
def load_stock_code_info(stock_ids=None, stock_codes=None, current=True):

    engine = database.connection('caihui')
    metadata = MetaData(bind=engine)
    t = Table('tq_sk_basicinfo', metadata, autoload=True)

    columns = [t.c.SECODE.label('stock_id'), t.c.SYMBOL.label('stock_code')]

    s = select(columns)
    if stock_ids is not None:
        s = s.where(t.c.SECODE.in_(stock_ids))
    if stock_codes is not None:
        s = s.where(t.c.SYMBOL.in_(stock_codes))
        if current is True:
            columns2 = [t.c.SYMBOL, func.max(t.c.LISTDATE)]
            s2 = select(columns2).group_by(t.c.SYMBOL)
            s = s.where(tuple_(t.c.SYMBOL, t.c.LISTDATE).in_(s2))

    df = pd.read_sql(s, engine, index_col=['stock_id'])

    return df
Esempio n. 46
0
def update_games(session, schedule_tbl, schedule_data):
    """Check if any games have been removed or added from the schedule and add that change to the database.

    ToDo: Add check for new games (i.e. when Clippers-Lakers gets rescheduled)
    ToDo: This should work for playoff games too, right?
    ToDo: Iterating through indices potentially slow, though great alternatives don't seem to exist
    """

    data_len = len(schedule_data.data['start_time'])
    tbl_len = session.query(schedule_tbl).count()
    if data_len < tbl_len:
        data_df = pd.DataFrame({
            'home_team_id':
            schedule_data.data['home_team_id'],
            'game_date':
            schedule_data.data['game_date']
        })

        tbl_id_dates = session.query(schedule_tbl.home_team_id,
                                     schedule_tbl.game_date).all()
        id_dates_dict = {
            'home_team_id': [r.home_team_id for r in tbl_id_dates],
            'game_date': [r.game_date for r in tbl_id_dates]
        }
        tbl_df = pd.DataFrame(id_dates_dict)

        # Outer join for all rows, indicator for diff column
        comp = data_df.merge(tbl_df, how='outer', indicator=True)
        tbl_only = comp[comp['_merge'] == 'right_only']
        ids = tbl_only['home_team_id'].values.tolist()
        dates = tbl_only['game_date'].values.tolist()
        cancelled_games = [(ids[i], dates[i]) for i in range(len(ids))]

        delete_rows = session.query(schedule_tbl).filter(
            tuple_(schedule_tbl.home_team_id,
                   schedule_tbl.game_date).in_(cancelled_games))
        if delete_rows.count() > 0:
            for row in delete_rows:
                session.delete(row)
Esempio n. 47
0
    def bulk_upsert(self, records: t.Iterable, db_model):
        primary_key = inspect(db_model).primary_key

        def build_key(entity) -> t.Tuple:
            return tuple(
                getattr(entity, primary_key_item.name)
                for primary_key_item in primary_key
            )

        db_objects = []
        db_objects_keys = []
        for record in records:
            db_object = db_model(**record)
            db_objects.append(db_object)
            db_objects_keys.append(build_key(db_object))

        query = self.session.query(db_model).filter(
            tuple_(*build_key(db_model)).in_(db_objects_keys)
        )
        query.delete(synchronize_session="fetch")
        self.session.flush()
        self.bulk_insert(db_objects)
Esempio n. 48
0
    def get_select_prop(self, s, tail, params):
        tl = self.db.templatelinks
        page = self.db.page
        target_page = self.db.page.alias()
        nss = self.db.namespace_starname.alias()

        tail = tail.outerjoin(tl, page.c.page_id == tl.c.tl_from)
        tail = tail.outerjoin(
            target_page, (tl.c.tl_namespace == target_page.c.page_namespace) &
            (tl.c.tl_title == target_page.c.page_title))
        tail = tail.outerjoin(nss, tl.c.tl_namespace == nss.c.nss_id)

        s = s.column(tl.c.tl_namespace)
        s = s.column(tl.c.tl_title)
        s = s.column(nss.c.nss_name.label("target_nss_name"))

        # restrictions
        if "namespace" in params:
            namespace = params["namespace"]
            if not isinstance(namespace, set):
                namespace = {namespace}
            s = s.where(tl.c.tl_namespace.in_(namespace))
        if "templates" in params:
            templates = params["templates"]
            if not isinstance(templates, set):
                templates = {templates}
            pairs = set()
            for template in templates:
                template = self.db.Title(template)
                pairs.add((template.namespacenumber, template.pagename))
            s = s.where(sa.tuple_(tl.c.tl_namespace, tl.c.tl_title).in_(pairs))

        # order by
        if params["dir"] == "ascending":
            s = s.order_by(tl.c.tl_namespace.asc(), tl.c.tl_title.asc())
        else:
            s = s.order_by(tl.c.tl_namespace.desc(), tl.c.tl_title.desc())

        return s, tail
Esempio n. 49
0
def load_by_primary_keys(session: Session, mapper: Mapper,
                         identities: Iterable[Tuple], *entities) -> Query:
    """ Given a Session, load many instances using a list of their primary keys

    Args:
        session: The Session to use for loading
        mapper: The mapper to filter the primary keys from
        identities: An itarable of identities (primary key tuples)
        entities: Additional entities to load with ssn.query(...)

    Returns:
        A Query.
        First field "pk": the identity tuple (the primary key)
        Other fields: the *entities you wanted loaded
    """
    pk_columns = get_primary_key_columns(mapper)

    # Load many instances by their primary keys
    #
    # First of all, we need to load the primary key, as well as the missing column's value, so it looks like we need
    #       pk_col1, pk_col2, ..., attr_value
    # But then in Python we would have to slice the list.
    # But because Postgres supports tuples, we select a tuple of the primary key instead:
    #       (pk_col1, pk_col2, ...), attr_value
    # Just two columns, one being a composite primary key.
    # It perfectly matches SqlAlchemy's instance identity, which is a tuple of primary keys.
    #
    # Secondly, the primary key condition. We're going to load N intances by their primary keys.
    # We could have done like this:
    #       WHERE (pk_col1=:val AND pk_col2=:val) OR (pk_col1=:val AND pk_col2=:val) OR ...
    # but once again, tuples are very convenient and readable:
    #       WHERE (pk_col1, pk_col2) IN ((:val, :val), (:val, :val), ...)
    #
    # Thanks for this neat trick, @vdmit11 :)
    return session.query(
        # That's the primary key tuple
        tuple_(*pk_columns).label('pk'),
        # Additional entities you want to load
        *entities).filter(build_primary_key_condition(pk_columns, identities))
Esempio n. 50
0
    def apply_filters(self, query, params):

        if params.get('filter[address]'):

            if len(params.get('filter[address]')) == 64:
                account_id = params.get('filter[address]')
            else:
                try:
                    account_id = ss58_decode(params.get('filter[address]'),
                                             SUBSTRATE_ADDRESS_TYPE)
                except ValueError:
                    return query.filter(False)
        else:
            account_id = None

        if params.get('filter[search_index]'):

            search_index = SearchIndex.query(self.session).filter_by(
                index_type_id=params.get('filter[search_index]'),
                account_id=account_id).order_by(
                    SearchIndex.sorting_value.desc())

            query = query.filter(
                tuple_(Event.block_id,
                       Event.event_idx).in_([[s.block_id, s.event_idx]
                                             for s in search_index]))
        else:

            if params.get('filter[module_id]'):

                query = query.filter_by(
                    module_id=params.get('filter[module_id]'))

            if params.get('filter[event_id]'):

                query = query.filter_by(
                    event_id=params.get('filter[event_id]'))

        return query
Esempio n. 51
0
def get_query_for_ids(modelquery, model, ids):
    """
        Return a query object, that contains all entities of the given model for
        the primary keys provided in the ids-parameter.

        The ``pks`` parameter is a tuple, that contains the different primary key values,
        that should be returned. If the primary key of the model consists of multiple columns
        every entry of the ``pks`` parameter must be a tuple containing the columns-values in the
        correct order, that make up the primary key of the model

        If the model has multiple primary keys, the
        `tuple_ <http://docs.sqlalchemy.org/en/latest/core/expression_api.html#sqlalchemy.sql.expression.tuple_>`_
        operator will be used. As this operator does not work on certain databases,
        notably on sqlite, a workaround function :func:`tuple_operator_in` is provided
        that implements the same logic using OR and AND operations.

        When having multiple primary keys, the pks are provided as a list of tuple-look-alike-strings,
        ``[u'(1, 2)', u'(1, 1)']``. These needs to be evaluated into real tuples, where
        `Stackoverflow Question 3945856 <http://stackoverflow.com/questions/3945856/converting-string-to-tuple-and-adding-to-tuple>`_
        pointed to `Literal Eval <http://docs.python.org/2/library/ast.html#ast.literal_eval>`_, which is now used.
    """
    if has_multiple_pks(model):
        model_pk = [
            getattr(model, pk_name).expression
            for pk_name in get_primary_key(model)
        ]
        ids = [literal_eval(id) for id in ids]
        try:
            query = modelquery.filter(tuple_(*model_pk).in_(ids))
            # Only the execution of the query will tell us, if the tuple_
            # operator really works
            query.all()
        except DBAPIError:
            query = modelquery.filter(tuple_operator_in(model_pk, ids))
    else:
        model_pk = getattr(model, get_primary_key(model))
        query = modelquery.filter(model_pk.in_(ids))
    return query
Esempio n. 52
0
def get_trip_pk_to_last_stop_map(trip_pks):
    """
    Get the map to trip PK to the last stop for the trip.

    :param trip_pks: the trip PKs to build the map for
    :return: trip_pk to Stop
    """
    session = dbconnection.get_session()

    sub_query = (session.query(
        models.TripStopTime.trip_pk,
        sqlalchemy.func.max(models.TripStopTime.stop_sequence),
    ).group_by(models.TripStopTime.trip_pk).filter(
        models.TripStopTime.trip_pk.in_(trip_pks)))
    query = (session.query(models.TripStopTime.trip_pk, models.Stop).filter(
        models.TripStopTime.stop_pk == models.Stop.pk).filter(
            sqlalchemy.tuple_(
                models.TripStopTime.trip_pk,
                models.TripStopTime.stop_sequence).in_(sub_query)))
    trip_pk_to_last_stop = {trip_pk: None for trip_pk in trip_pks}
    for trip_pk, last_stop in query:
        trip_pk_to_last_stop[trip_pk] = last_stop
    return trip_pk_to_last_stop
Esempio n. 53
0
def dataset_comment_followers(context, data_dict):
    """
    Number of dataset comment followers at an organization level
    :param context:
    :param data_dict:
    :return:
    """
    org_id = data_dict.get('org_id', None)
    utc_start_date = data_dict.get('utc_start_date', None)
    utc_end_date = data_dict.get('utc_end_date', None)

    check_org_access(org_id)

    try:
        db.init_db(model)
        return (_session_.query(
            # We want to count a user each time they follow a comment thread, not just unique user IDs
            func.count(
                distinct(
                    tuple_(CommentNotificationRecipient.user_id,
                           CommentNotificationRecipient.thread_id))
            )).filter(
                _and_(CommentThread.url.like(DATASET_LIKE),
                      Comment.state == ACTIVE_STATE,
                      Comment.creation_date >= utc_start_date,
                      Comment.creation_date < utc_end_date,
                      Package.owner_org == org_id,
                      Package.state == ACTIVE_STATE)).join(
                          CommentThread,
                          CommentThread.id == CommentNotificationRecipient.
                          thread_id).join(Comment).join(
                              Package, Package.name == _replace_(
                                  CommentThread.url, DATASET_PREFIX,
                                  ''))).scalar()

    except Exception as e:
        log.error(str(e))
Esempio n. 54
0
    def test_joins(self):
        t1, t2 = self.create_tables(2)

        query = session.query(t1.c.x, t2.c.x) \
            .join(t2, tuple_(t1.c.x, t1.c.y), any=True)

        self.assertEqual(
            self.compile(query), "SELECT x AS t0_x, x AS t1_x FROM t0 "
            "ANY INNER JOIN t1 USING x, y")

        query = session.query(t1.c.x, t2.c.x) \
            .join(t2, tuple_(t1.c.x, t1.c.y), all=True)

        self.assertEqual(
            self.compile(query), "SELECT x AS t0_x, x AS t1_x FROM t0 "
            "ALL INNER JOIN t1 USING x, y")

        query = session.query(t1.c.x, t2.c.x) \
            .join(t2, tuple_(t1.c.x, t1.c.y), all=True, global_=True)

        self.assertEqual(
            self.compile(query), "SELECT x AS t0_x, x AS t1_x FROM t0 "
            "GLOBAL ALL INNER JOIN t1 USING x, y")

        query = session.query(t1.c.x, t2.c.x) \
            .outerjoin(t2, tuple_(t1.c.x, t1.c.y), all=True, global_=True)

        self.assertEqual(
            self.compile(query), "SELECT x AS t0_x, x AS t1_x FROM t0 "
            "GLOBAL ALL LEFT OUTER JOIN t1 USING x, y")

        query = session.query(t1.c.x, t2.c.x) \
            .outerjoin(t2, tuple_(t1.c.x, t1.c.y), all=True, global_=True)

        self.assertEqual(
            self.compile(query), "SELECT x AS t0_x, x AS t1_x FROM t0 "
            "GLOBAL ALL LEFT OUTER JOIN t1 USING x, y")

        query = session.query(t1.c.x, t2.c.x) \
            .outerjoin(t2, tuple_(t1.c.x, t1.c.y), all=True, full=True)

        self.assertEqual(
            self.compile(query), "SELECT x AS t0_x, x AS t1_x FROM t0 "
            "ALL FULL OUTER JOIN t1 USING x, y")
Esempio n. 55
0
def get_db_records(pids):
    """Get an iterator on record metadata from the DB.

    Args:
        pids (Iterable[Tuple[str, Union[str, int]]): a list of (pid_type, pid_value) tuples.

    Return:
        list(dict): metadata of the records found in the database.
    """
    pids = [(pid_type, str(pid_value)) for (pid_type, pid_value) in pids]

    if not pids:
        return

    query = RecordMetadata.query.join(
        PersistentIdentifier,
        RecordMetadata.id == PersistentIdentifier.object_uuid
    ).filter(
        PersistentIdentifier.object_type == 'rec',
        tuple_(
            PersistentIdentifier.pid_type,
            PersistentIdentifier.pid_value).in_(pids)
    )
    return [rec.json for rec in query.all()]
Esempio n. 56
0
    def lookup_from_links(links):
        """Lookup for an object from its links.

        Parameters
        ----------
        links : :obj:`list` of :obj:`tuple` of :obj:`int`
            list of links (platform, external_id) to use for lookup

        Returns
        -------
        :obj:`ExternalObject`
            the ExternalObject found using the links
        :obj:`None`
            if no ExternalObject was found

        """
        # Existing links from DB
        db_links = ObjectLink.query\
            .filter(tuple_(ObjectLink.platform_id,
                           ObjectLink.external_id).in_(links))\
            .all()

        if len(db_links) == 0:
            return None
        else:
            # Check if they all link to the same object.
            # We may want to merge afterwards if they don't match
            objects = set(map(attrgetter('external_object'), db_links))

            # A set of those IDs should have a length of one
            # because there is only one distinct value in the array
            if len(objects) != 1:
                raise AmbiguousLinkError(objects)

            # Fetch the linked object
            return db_links[0].external_object
Esempio n. 57
0
  def create_revisions(resources, event_id, user_id, action):
    """Create revisions for provided objects in bulk.

    Args:
        resources: [(obj_type, obj_id)] List with types and ids of objects.
        event_id: id of event that lead to revisions creation.
        user_id: id of user for which revisions should be created.
        action: action that will be displayed in revisions
    """
    issue_objs = all_models.IssuetrackerIssue.query.filter(
        sa.tuple_(
            all_models.IssuetrackerIssue.object_type,
            all_models.IssuetrackerIssue.object_id
        ).in_(resources)
    )
    revision_data = [
        {
            "resource_id": obj.id,
            "resource_type": obj.type,
            "event_id": event_id,
            "action": action,
            "content": obj.log_json(),
            "resource_slug": None,
            "source_type": None,
            "source_id": None,
            "destination_type": None,
            "destination_id": None,
            "updated_at": datetime.datetime.utcnow(),
            "modified_by_id": user_id,
            "created_at": datetime.datetime.utcnow(),
            "context_id": obj.context_id,
        }
        for obj in issue_objs
    ]
    inserter = all_models.Revision.__table__.insert()
    db.session.execute(inserter.values(revision_data))
Esempio n. 58
0
    def bulk_update(self, table_name, raw_rows):
        # raw_rows in format [{}, {}, ...] with auto-detected primary_key
        table = self.get_table(table_name)

        rows_validator = rows_validator_factory(table, with_id=True)
        valid_rows = rows_validator(rows=raw_rows).dict()['rows']

        with self.engine.connect() as connection:
            with connection.begin():
                primary_key_name = table.primary_key.columns.values()[0].name
                table_key = getattr(table.c, primary_key_name)

                stmt = table.update(). \
                    where(table_key == bindparam(primary_key_name)). \
                    values({key: bindparam(key) for key in valid_rows[0]})

                connection.execute(stmt, valid_rows)

                keys = [col for col in table.c]
                values = [tuple(row.values()) for row in valid_rows]
                query = table.select().where(tuple_(*keys).in_(values))
                updated_rows = [dict(row) for row in connection.execute(query)]

        return updated_rows
Esempio n. 59
0
users.create()  # 创建表


def execute(s):
    print '-' * 20
    rs = con.execute(s)
    for row in rs:
        print row['Id'], row['Name']


with eng.connect() as con:
    for username in ('xiaoming', 'wanglang', 'lilei'):
        user = users.insert().values(Name=username)
        con.execute(user)

    stm = select([users]).limit(1)
    execute(stm)

    k = [(2, )]
    stm = select([users]).where(tuple_(users.c.Id).in_(k))
    execute(stm)

    stm = select([users]).where(and_(users.c.Id > 2, users.c.Id < 4))
    execute(stm)

    stm = select([users]).order_by(asc(users.c.Name))
    execute(stm)

    stm = select([users]).where(users.c.Name.like('%min%'))
    execute(stm)
    def view_grid(self):
        req = self.req
        ses = req.session
        params   = req.params
        url_dict = req.matchdict 
        if url_dict['act'] == 'grid':
            columns, query = get_columns()
            query = query.filter(PosSppt.tgl_cetak_sppt.between(self.dt_awal, self.dt_akhir))
            rowTable = DataTables(req.GET, query, columns)
            return rowTable.output_result()

        elif url_dict['act'] == 'rekon':
            query = PosPbbDBSession.query(PosSppt.kd_propinsi,
                                          PosSppt.kd_dati2,
                                          PosSppt.kd_kecamatan,
                                          PosSppt.kd_kelurahan,
                                          PosSppt.kd_blok,
                                          PosSppt.no_urut, 
                                          PosSppt.kd_jns_op, 
                                          PosSppt.thn_pajak_sppt).\
                        filter(PosSppt.tgl_cetak_sppt.between(ses['dt_awal'],ses['dt_akhir']))
            rows = query.all()
        
            queryPbb = PbbDBSession.query(Sppt.kd_propinsi,
                                          Sppt.kd_dati2,
                                          Sppt.kd_kecamatan,
                                          Sppt.kd_kelurahan,
                                          Sppt.kd_blok,
                                          Sppt.no_urut, 
                                          Sppt.kd_jns_op, 
                                          Sppt.thn_pajak_sppt).\
                        filter(Sppt.tgl_cetak_sppt.between(ses['dt_awal'],ses['dt_akhir']))
            rowPbbs = queryPbb.all()
            rowNotFound = []
            if len(rows) != len(rowPbbs):
                rowNotFound = list(set(rows) - set(rowPbbs))
            #print "**DEBUG**", len(rows), len(rowPbbs)
            
            columns,query = get_columns()
            qry = query.filter(tuple_(PosSppt.kd_propinsi,
                                      PosSppt.kd_dati2,
                                      PosSppt.kd_kecamatan,
                                      PosSppt.kd_kelurahan,
                                      PosSppt.kd_blok,
                                      PosSppt.no_urut, 
                                      PosSppt.kd_jns_op, 
                                      PosSppt.thn_pajak_sppt).in_(rowNotFound[:100]))
                        
            rowTable = DataTables(req.GET, qry, columns)
            return rowTable.output_result()
            
        elif url_dict['act'] == 'update':
            bayar = FixSppt(req.params['id'])
            query = PosPbbDBSession.query(PosSppt).\
                        filter_by(kd_propinsi    = bayar['kd_propinsi'],
                                  kd_dati2       = bayar['kd_dati2'],
                                  kd_kecamatan   = bayar['kd_kecamatan'],
                                  kd_kelurahan   = bayar['kd_kelurahan'],
                                  kd_blok        = bayar['kd_blok'],
                                  no_urut        = bayar['no_urut'], 
                                  kd_jns_op      = bayar['kd_jns_op'], 
                                  thn_pajak_sppt = bayar['thn_pajak_sppt'])
            row = query.first()
            if row:
                rowPbb = Sppt()
                rowPbb.from_dict(row.to_dict())
 
                try:
                    PbbDBSession.add(rowPbb)
                    PbbDBSession.flush()
                except:
                    return dict(success=0,message='Gagal %s' %bayar.get_raw())
            return dict(success=1,message='Sukses')