def limit_groups(query, model, partition_by, order_by, limit=None, offset=0): """Limits the number of rows returned for each group This utility allows you to apply a limit/offset to grouped rows of a query. Note that the query will only contain the data from `model`; i.e. you cannot add additional entities. :param query: The original query, including filters, joins, etc. :param model: The model class for `query` :param partition_by: The column to group by :param order_by: The column to order the partitions by :param limit: The maximum number of rows for each partition :param offset: The number of rows to skip in each partition """ inner = query.add_columns( over(func.row_number(), partition_by=partition_by, order_by=order_by).label('rownum')).subquery() query = model.query.select_entity_from(inner) if limit: return query.filter(offset < inner.c.rownum, inner.c.rownum <= (limit + offset)) else: return query.filter(offset < inner.c.rownum)
def get_issues(session, repo, status=None, closed=False): ''' Retrieve all the issues associated to a project Watch out that the closed argument is incompatible with the status argument. The closed argument will return all the issues whose status is not 'Open', otherwise it will return the issues having the specified status. ''' subquery = session.query( model.GlobalId, sqlalchemy.over( sqlalchemy.func.row_number(), partition_by=model.GlobalId.project_id, order_by=model.GlobalId.id).label('global_id')).subquery() query = session.query(model.Issue, subquery.c.global_id).filter( subquery.c.issue_id == model.Issue.id).filter( subquery.c.project_id == model.Issue.project_id).filter( model.Issue.project_id == repo.id).order_by(model.Issue.id) if status is not None and not closed: query = query.filter(model.Issue.status == status) if closed: query = query.filter(model.Issue.status != 'Open') return query.all()
def get_pull_request(session, requestid, project_id=None, project_id_from=None): ''' Retrieve the specified issue ''' subquery = session.query( model.GlobalId, sqlalchemy.over( sqlalchemy.func.row_number(), partition_by=model.GlobalId.project_id, order_by=model.GlobalId.id).label('global_id')).subquery() query = session.query(model.PullRequest).filter( subquery.c.project_id == model.PullRequest.project_id).filter( subquery.c.request_id == model.PullRequest.id).filter( subquery.c.global_id == requestid).order_by( model.PullRequest.id) if project_id: query = query.filter(model.PullRequest.project_id == project_id) if project_id_from: query = query.filter( model.PullRequest.project_id_from == project_id_from) return query.first()
def prepare_filter_q(self, *, session, offset=None, limit=None, filter_q=None, filter_by_q=None, join=None, fetch_args=None, group_by=None, outerjoin=None, order=None): if not order and hasattr(self.entity, "created"): order = {"field": "created", "order": 1} if fetch_args is None: fetch_args = [] q = session.query( self.entity, sa.over(sa.func.count()).label("total_items"), *fetch_args, ) if join: q = q.join(*join) if outerjoin: q = q.outerjoin(*outerjoin) if filter_q: q = q.filter(*filter_q) if filter_by_q: q = q.filter_by(**filter_by_q) if group_by: q = q.group_by(*group_by) if offset is None: offset = 0 if limit is None: limit = 50 if order: q = self._add_order(q, order) q = q.offset(offset).limit(limit) return q
def get_issue(session, issueid): ''' Retrieve the specified issue ''' subquery = session.query( model.GlobalId, sqlalchemy.over( sqlalchemy.func.row_number(), partition_by=model.GlobalId.project_id, order_by=model.GlobalId.id).label('global_id')).subquery() query = session.query(model.Issue).filter( subquery.c.project_id == model.Issue.project_id).filter( subquery.c.issue_id == model.Issue.id).filter( subquery.c.global_id == issueid).order_by(model.Issue.id) return query.first()
def index(self, **kw): subq = DBSession.query(TrackingFix, over(func.rank(), partition_by=TrackingFix.pilot_id, order_by=desc(TrackingFix.time)).label('rank')) \ .outerjoin(TrackingFix.pilot) \ .filter(TrackingFix.time >= datetime.utcnow() - timedelta(hours=6)) \ .filter(TrackingFix.location_wkt != None) \ .subquery() query = DBSession.query(TrackingFix) \ .filter(TrackingFix.id == subq.c.id) \ .filter(subq.c.rank == 1) \ .order_by(desc(TrackingFix.time)) tracks = [] for track in query.all(): airport = Airport.by_location(track.location, None) distance = airport.distance(track.location) tracks.append([track, airport, distance]) return dict(tracks=tracks)
def prepare_versions_q(self, id, *, session, order=None, offset=None, limit=None, filter_q=None, filter_by_q=None, fetch_args=None): if not order and hasattr(self.versions_entity, "version"): order = {"field": "version", "order": 1} if fetch_args is None: fetch_args = [] q = session.query( self.versions_entity, sa.over(sa.func.count()).label("total_items"), *fetch_args, ) if not isinstance(id, list): id = [id] q = q.filter(self._version_object_key_field().in_(id)) if filter_q: q = q.filter(*filter_q) if filter_by_q: q = q.filter_by(**filter_by_q) if order: q = self._add_versions_order(q, order) q = q.offset(offset).limit(limit) return q
def get_latest_fixes(self, max_age=timedelta(hours=6), **kw): row_number = over(func.row_number(), partition_by=TrackingFix.pilot_id, order_by=desc(TrackingFix.time)) tracking_delay = cast(cast(User.tracking_delay, String) + ' minutes', Interval) subq = DBSession.query(TrackingFix.id, row_number.label('row_number')) \ .outerjoin(TrackingFix.pilot) \ .filter(TrackingFix.time >= datetime.utcnow() - max_age) \ .filter(TrackingFix.time <= datetime.utcnow() - tracking_delay) \ .filter(TrackingFix.location_wkt != None) \ .subquery() query = DBSession.query(TrackingFix) \ .options(joinedload(TrackingFix.pilot)) \ .filter(TrackingFix.id == subq.c.id) \ .filter(subq.c.row_number == 1) \ .order_by(desc(TrackingFix.time)) return query
def limit_groups(query, model, partition_by, order_by, limit=None, offset=0): """Limits the number of rows returned for each group This utility allows you to apply a limit/offset to grouped rows of a query. Note that the query will only contain the data from `model`; i.e. you cannot add additional entities. :param query: The original query, including filters, joins, etc. :param model: The model class for `query` :param partition_by: The column to group by :param order_by: The column to order the partitions by :param limit: The maximum number of rows for each partition :param offset: The number of rows to skip in each partition """ inner = query.add_columns(over(func.row_number(), partition_by=partition_by, order_by=order_by).label('rownum')).subquery() query = model.query.select_entity_from(inner) if limit: return query.filter(offset < inner.c.rownum, inner.c.rownum <= (limit + offset)) else: return query.filter(offset < inner.c.rownum)
def to_expression(self, t): # simple type if (isinstance(t, str) or isinstance(t, int) or isinstance(t, float) or t is None): t = ast.Constant(t) if isinstance(t, ast.Star): col = '*' elif isinstance(t, ast.Constant): col = sa.literal(t.value) if t.alias: alias = self.get_alias(t.alias) else: if t.value is None: alias = 'NULL' else: alias = str(t.value) col = col.label(alias) elif isinstance(t, ast.Identifier): col = self.to_column(t.parts) if t.alias: col = col.label(self.get_alias(t.alias)) elif isinstance(t, ast.Select): sub_stmt = self.prepare_select(t) col = sub_stmt.scalar_subquery() if t.alias: alias = self.get_alias(t.alias) col = col.label(alias) elif isinstance(t, ast.Function): op = getattr(sa.func, t.op) args = [self.to_expression(i) for i in t.args] if t.distinct: # set first argument to distinct args[0] = args[0].distinct() col = op(*args) if t.alias: alias = self.get_alias(t.alias) else: alias = str(t.op) col = col.label(alias) elif isinstance(t, ast.BinaryOperation): methods = { "+": "__add__", "-": "__sub__", "/": "__div__", "*": "__mul__", "%": "__mod__", "=": "__eq__", "!=": "__ne__", ">": "__gt__", "<": "__lt__", ">=": "__ge__", "<=": "__le__", "is": "is_", "is not": "is_not", "like": "like", "in": "in_", "not in": "notin_", "||": "concat", } functions = { "and": sa.and_, "or": sa.or_, } arg0 = self.to_expression(t.args[0]) arg1 = self.to_expression(t.args[1]) method = methods.get(t.op.lower()) if method is not None: sa_op = getattr(arg0, method) col = sa_op(arg1) else: func = functions[t.op.lower()] col = func(arg0, arg1) if t.alias: alias = self.get_alias(t.alias) col = col.label(alias) elif isinstance(t, ast.UnaryOperation): # not or munus opmap = { "NOT": "__invert__", "-": "__neg__", } arg = self.to_expression(t.args[0]) method = opmap[t.op.upper()] col = getattr(arg, method)() if t.alias: alias = self.get_alias(t.alias) col = col.label(alias) elif isinstance(t, ast.BetweenOperation): col0 = self.to_expression(t.args[0]) lim_down = self.to_expression(t.args[1]) lim_up = self.to_expression(t.args[2]) col = sa.between(col0, lim_down, lim_up) elif isinstance(t, ast.WindowFunction): func = self.to_expression(t.function) partition = None if t.partition is not None: partition = [self.to_expression(i) for i in t.partition] order_by = None if t.order_by is not None: order_by = [] for f in t.order_by: col0 = self.to_expression(f.field) if f.direction == 'DESC': col0 = col0.desc() order_by.append(col0) col = sa.over(func, partition_by=partition, order_by=order_by) if t.alias: col = col.label(self.get_alias(t.alias)) elif isinstance(t, ast.TypeCast): arg = self.to_expression(t.arg) type = self.get_type(t.type_name) col = sa.cast(arg, type) if t.alias: alias = self.get_alias(t.alias) col = col.label(alias) elif isinstance(t, ast.Parameter): col = sa.column(t.value, is_literal=True) if t.alias: raise Exception() elif isinstance(t, ast.Tuple): col = [self.to_expression(i) for i in t.items] elif isinstance(t, ast.Variable): col = sa.column(t.to_string(), is_literal=True) elif isinstance(t, ast.Latest): col = sa.column(t.to_string(), is_literal=True) else: # some other complex object? raise NotImplementedError(f'Column {t}') return col