def _apply_filters(self, query, params): if params.get('start'): query = query.filter(CallLog.date >= params['start']) if params.get('end'): query = query.filter(CallLog.date < params['end']) if params.get('call_direction'): query = query.filter(CallLog.direction == params['call_direction']) if params.get('cdr_ids'): query = query.filter(CallLog.id.in_(params['cdr_ids'])) if params.get('id'): query = query.filter(CallLog.id == params['id']) if params.get('search'): filters = (sql.cast(column, sa.String).ilike('%%%s%%' % params['search']) for column in self.searched_columns) query = query.filter(sql.or_(*filters)) if params.get('number'): sql_regex = params['number'].replace('_', '%') filters = (sql.cast(column, sa.String).like('%s' % sql_regex) for column in [ CallLog.source_exten, CallLog.destination_exten, ]) query = query.filter(sql.or_(*filters)) for tag in params.get('tags', []): query = query.filter( CallLog.participants.any( CallLogParticipant.tags.contains( sql.cast([tag], ARRAY(sa.String))))) if params.get('tenant_uuids'): query = query.filter( CallLog.tenant_uuid.in_(params['tenant_uuids'])) if params.get('me_user_uuid'): me_user_uuid = params['me_user_uuid'] query = query.filter( CallLog.participant_user_uuids.contains(str(me_user_uuid))) if params.get('user_uuids'): filters = (CallLog.participant_user_uuids.contains(str(user_uuid)) for user_uuid in params['user_uuids']) query = query.filter(sql.or_(*filters)) if params.get('start_id'): query = query.filter(CallLog.id >= params['start_id']) if params.get('recorded') is not None: if params['recorded']: query = query.filter(CallLog.recordings.any()) else: query = query.filter(~CallLog.recordings.any()) return query
def overlaps(lhs, rhs, min_overlap=None, rhs_start=None, rhs_end=None): """Returns term of ``lhs`` overlapping with ``rhs`` based on the start/end fields.""" if rhs_start is None: rhs_start = rhs.sa.start if rhs_end is None: rhs_end = rhs.sa.end if min_overlap is None: return and_( lhs.sa.release == rhs.sa.release, lhs.sa.chromosome == rhs.sa.chromosome, lhs.sa.bin.in_( select([column("bin")]).select_from(func.overlapping_bins(rhs_start - 1, rhs_end)) ), lhs.sa.end >= rhs_start, lhs.sa.start <= rhs_end, ) else: term_overlap = func.least(lhs.sa.end, rhs_end) - func.greatest(lhs.sa.start, rhs_start) + 1 return and_( lhs.sa.release == rhs.sa.release, lhs.sa.chromosome == rhs.sa.chromosome, rhs.sa.bin.in_( select([column("bin")]).select_from( func.overlapping_bins(lhs.sa.start - 1, lhs.sa.end) ) ), lhs.sa.end >= rhs_start, lhs.sa.start <= rhs_end, cast(term_overlap, Float) / func.greatest((rhs_end - rhs_start + 1), 1) > min_overlap, cast(term_overlap, Float) / func.greatest((lhs.sa.end - lhs.sa.start + 1), 1) > min_overlap, )
def __table_args__(cls): return (db.Index('ix_reservations_start_dt_date', cast(cls.start_dt, Date)), db.Index('ix_reservations_end_dt_date', cast(cls.end_dt, Date)), db.Index('ix_reservations_start_dt_time', cast(cls.start_dt, Time)), db.Index('ix_reservations_end_dt_time', cast(cls.end_dt, Time)), db.CheckConstraint("rejection_reason != ''", 'rejection_reason_not_empty'), {'schema': 'roombooking'})
def aspect_range(stmt, parent_uri, nodes): """ Find all image nodes having an aspect ratio between two values Query parameters: value_min - Aspect ratio (float) value_max - Aspect ratio (float) """ value_min = float(nodes.pop(0)) value_max = float(nodes.pop(0)) subq = select([node_meta_table.c.md5]) subq = subq.where( cast( node_meta_table.c.metadata.op('->') ('aspect_ratio'), Numeric(7, 3)) >= value_min) subq = subq.where( cast( node_meta_table.c.metadata.op('->') ('aspect_ratio'), Numeric(7, 3)) <= value_max) stmt = stmt.filter(Node.md5.in_(subq)) return stmt
def fetch_legs(db, where): legs = db.metadata.tables["leg_modes"] legends0 = db.metadata.tables["leg_ends"] legends1 = legends0.alias("legends1") places0 = db.metadata.tables["places"] places1 = places0.alias("places1") s = select( [ legs.c.id, legs.c.time_start, legs.c.time_end, legs.c.activity, legs.c.line_type, legs.c.line_name, legs.c.km, legs.c.trip, func.coalesce(places0.c.label, cast( places0.c.id, String)).label("startplace"), func.coalesce(places1.c.label, cast( places1.c.id, String)).label("endplace")], where, legs.outerjoin(legends0, legs.c.cluster_start == legends0.c.id) \ .outerjoin(legends1, legs.c.cluster_end == legends1.c.id) \ .outerjoin(places0, legends0.c.place == places0.c.id) \ .outerjoin(places1, legends1.c.place == places1.c.id), order_by=legs.c.time_start) return db.engine.execute(s)
def list(self, line_id): user_line_query = ( Session.query(UserLine.line_id, UserLine.extension_id).filter( UserLine.line_id == line_id).filter( UserLine.extension_id != None) # noqa .distinct()) incall_query = ( Session.query(UserLine.line_id, Extension.id.label('extension_id')).join( Dialaction, and_( Dialaction.action == 'user', cast(Dialaction.actionarg1, Integer) == UserLine.user_id, UserLine.main_line == True)) # noqa .join( Incall, and_(Dialaction.category == 'incall', cast(Dialaction.categoryval, Integer) == Incall.id)).join( Extension, and_(Incall.exten == Extension.exten, Incall.context == Extension.context)).filter( UserLine.line_id == line_id)) return [ LineExtension(row.line_id, row.extension_id) for row in user_line_query.union(incall_query) ]
def query(self): pq = qualstat_getstatdata(column("eval_type") == "f") base = alias(pq) query = (select([ func.array_agg(column("queryid")).label("queryids"), "qualid", cast(column("quals"), JSONB).label('quals'), "occurences", "execution_count", func.array_agg(column("query")).label("queries"), "avg_filter", "filter_ratio" ]).select_from( join(base, powa_databases, onclause=( powa_databases.c.oid == literal_column("dbid")))) .where(powa_databases.c.datname == bindparam("database")) .where(column("avg_filter") > 1000) .where(column("filter_ratio") > 0.3) .group_by(column("qualid"), column("execution_count"), column("occurences"), cast(column("quals"), JSONB), column("avg_filter"), column("filter_ratio")) .order_by(column("occurences").desc()) .limit(200)) return query
def aspect_range(stmt, parent_uri, nodes): """ Find all image nodes having an aspect ratio between two values Query parameters: value_min - Aspect ratio (float) value_max - Aspect ratio (float) """ value_min = float(nodes.pop(0)) value_max = float(nodes.pop(0)) subq = select([node_meta_table.c.md5]) subq = subq.where( cast(node_meta_table.c.metadata.op('->')('aspect_ratio'), Numeric(7,3)) >= value_min) subq = subq.where( cast(node_meta_table.c.metadata.op('->')('aspect_ratio'), Numeric(7,3)) <= value_max) stmt = stmt.filter(Node.md5.in_(subq)) return stmt
def __table_args__(cls): return ( db.Index("ix_reservations_start_dt_date", cast(cls.start_dt, Date)), db.Index("ix_reservations_end_dt_date", cast(cls.end_dt, Date)), db.Index("ix_reservations_start_dt_time", cast(cls.start_dt, Time)), db.Index("ix_reservations_end_dt_time", cast(cls.end_dt, Time)), {"schema": "roombooking"}, )
def _build_notification_before_days_filter(notification_before_days): days_until_occurrence = cast(ReservationOccurrence.start_dt, Date) - cast(func.now(), Date) notification_before_days = func.coalesce(Room.notification_before_days, notification_before_days) if datetime.now().hour >= settings.get('notification_hour', 6): # Notify of today and delayed occurrences (happening in N or less days) return days_until_occurrence <= notification_before_days else: # Notify only of delayed occurrences (happening in less than N days) return days_until_occurrence < notification_before_days
def create_coordinate(lat, lng): """ Creates a WKT point from a (lat, lng) tuple in EPSG4326 coordinate system (normal GPS-coordinates) """ wkb_point = from_shape(Point(lng, lat), srid=4326) # Casting to Geography and back here to ensure coordinate wrapping return cast( cast(wkb_point, Geography(geometry_type="POINT", srid=4326)), Geometry(geometry_type="POINT", srid=4326) )
def find_incall_id(self, extension_id): return (self.session.query(Incall.id) .join(Extension, and_(Extension.type == 'incall', cast(Extension.typeval, Integer) == Incall.id)) .join(Dialaction, and_(Dialaction.category == 'incall', cast(Dialaction.categoryval, Integer) == Incall.id)) .filter(Extension.id == extension_id) .scalar())
def make_filter(cls, field, ftype, value): filter = None if ftype == 'IN': filter = field.in_([v for v in value if v]) elif ftype == 'date_gt': filter = field > value elif ftype == 'date_gte': filter = field >= value elif ftype == 'date_gt_now_less': qty, granularity = value.split(" ") filter = field > func.date_trunc(granularity, func.now() - cast(value, Interval())) elif ftype == 'date_lt_now_less': qty, granularity = value.split(" ") filter = field < func.date_trunc(granularity, func.now() - cast(value, Interval())) elif ftype == 'date_x_last_n': qty, granularity, count_current_period = value.split(" ") filter = (field > func.date_trunc(granularity, func.now() - cast("%s %s" % (qty, granularity), Interval())), field < func.date_trunc(granularity, func.now() - cast('0', Interval())),) if count_current_period == 'on': filter = filter[0] elif ftype == 'date_month_ne': filter = extract('month', field) != value elif ftype == 'date_month_gt': filter = extract('month', field) > value elif ftype == 'date_month_lt': filter = extract('month', field) < value elif ftype == 'date_month_eq': filter = extract('month', field) == value elif ftype == 'date_hour_ne': filter = extract('hour', field) != value elif ftype == 'date_hour_gt': filter = extract('hour', field) > value elif ftype == 'date_hour_lt': filter = extract('hour', field) < value elif ftype == 'date_hour_eq': filter = extract('hour', field) == value elif ftype == 'date_lt': filter = field < value elif ftype == 'date_lte': filter = field <= value elif ftype == '=': filter = field == value elif ftype == '!=': filter = field != value elif ftype == '>': filter = field > value elif ftype == '>=': filter = field >= value elif ftype == '<': filter = field < value elif ftype == '<=': filter = field <= value elif ftype == 'like': filter = field.ilike(value) return filter
def find_all_by_member_id(session, member_id): query = (session.query(Callfiltermember.id, sql.cast(Callfiltermember.typeval, Integer).label('member_id'), Callfiltermember.bstype.label('role')) .filter(Callfiltermember.type == 'user') .filter(Callfiltermember.bstype.in_(['boss', 'secretary'])) .filter(sql.cast(Callfiltermember.typeval, Integer) == member_id) ) return [FilterMember(id=row.id, member_id=row.member_id, role=row.role) for row in query]
def is_in_notification_window(self, exclude_first_day=False): from indico.modules.rb import settings as rb_settings from indico.modules.rb.models.rooms import Room in_the_past = cast(self.start_dt, Date) < cast(func.now(), Date) days_until_occurrence = cast(self.start_dt, Date) - cast(func.now(), Date) notification_window = func.coalesce(Room.notification_before_days, rb_settings.get('notification_before_days', 1)) if exclude_first_day: return (days_until_occurrence < notification_window) & ~in_the_past else: return (days_until_occurrence <= notification_window) & ~in_the_past
def __table_args__(cls): return (db.Index('ix_reservations_start_dt_date', cast(cls.start_dt, Date)), db.Index('ix_reservations_end_dt_date', cast(cls.end_dt, Date)), db.Index('ix_reservations_start_dt_time', cast(cls.start_dt, Time)), db.Index('ix_reservations_end_dt_time', cast(cls.end_dt, Time)), { 'schema': 'roombooking' })
def _build_notification_before_days_filter(notification_before_days): days_until_occurrence = cast(ReservationOccurrence.start_dt, Date) - cast( func.now(), Date) notification_before_days = func.coalesce(Room.notification_before_days, notification_before_days) if datetime.now().hour >= settings.get('notification_hour', 6): # Notify of today and delayed occurrences (happening in N or less days) return days_until_occurrence <= notification_before_days else: # Notify only of delayed occurrences (happening in less than N days) return days_until_occurrence < notification_before_days
def __init__(self): """Find the date range and instantiate the data dictionary.""" connect_to_db(app) self.min_year = db.session.query( cast(func.min(extract('year', MedicalCall.received_dttm)), Integer) ).scalar() self.max_year = db.session.query( cast(func.max(extract('year', MedicalCall.received_dttm)), Integer) ).scalar() self.data = {}
def __getitem__(self, key): bind = self.column.table.bind values = list( bind.execute( select( [ self.function(cast(key, types.Date)).label("id"), self.label_expression(cast(key, types.Date)).label("label"), ] ) ) )[0] return Member(self, values.id, values.label)
def _export_reservations(hook, limit_per_room, include_rooms, extra_filters=None): """Exports reservations. :param hook: The HTTPAPIHook instance :param limit_per_room: Should the limit/offset be applied per room :param include_rooms: Should reservations include room information """ filters = list(extra_filters) if extra_filters else [] if hook._fromDT and hook._toDT: filters.append(cast(Reservation.start_dt, Date) <= hook._toDT.date()) filters.append(cast(Reservation.end_dt, Date) >= hook._fromDT.date()) filters.append(cast(Reservation.start_dt, Time) <= hook._toDT.time()) filters.append(cast(Reservation.end_dt, Time) >= hook._fromDT.time()) elif hook._toDT: filters.append(cast(Reservation.end_dt, Date) <= hook._toDT.date()) filters.append(cast(Reservation.end_dt, Time) <= hook._toDT.time()) elif hook._fromDT: filters.append(cast(Reservation.start_dt, Date) >= hook._fromDT.date()) filters.append(cast(Reservation.start_dt, Time) >= hook._fromDT.time()) filters += _get_reservation_state_filter(hook._queryParams) data = ['vc_equipment'] if hook._occurrences: data.append('occurrences') order = { 'start': Reservation.start_dt, 'end': Reservation.end_dt }.get(hook._orderBy, Reservation.start_dt) if hook._descending: order = order.desc() reservations_data = Reservation.get_with_data(*data, filters=filters, limit=hook._limit, offset=hook._offset, order=order, limit_per_room=limit_per_room) for result in reservations_data: yield result['reservation'].room_id, _serializable_reservation(result, include_rooms)
def pg(element: function, compiler: SQLCompiler, **kw: Dict[str, Any]) -> str: args = iter(element.clauses) # type: ignore json_field = next(args) # type: ignore type_bind_param = next(args) # type: ignore type_: TypeEngine = type_bind_param.value # type: ignore assert isinstance(type_, TypeEngine) or issubclass(type_, TypeEngine) select_from = sqla_func.jsonb_array_elements(cast( json_field, JSONB)).table_valued("value") statement = select([sqla_func.array_agg(cast(select_from.column, type_))]) return compiler.process(statement, **kw)
def _export_reservations(hook, limit_per_room, include_rooms, extra_filters=None): """Exports reservations. :param hook: The HTTPAPIHook instance :param limit_per_room: Should the limit/offset be applied per room :param include_rooms: Should reservations include room information """ filters = list(extra_filters) if extra_filters else [] if hook._fromDT and hook._toDT: filters.append(cast(Reservation.start_dt, Date) <= hook._toDT.date()) filters.append(cast(Reservation.end_dt, Date) >= hook._fromDT.date()) filters.append(cast(Reservation.start_dt, Time) <= hook._toDT.time()) filters.append(cast(Reservation.end_dt, Time) >= hook._fromDT.time()) elif hook._toDT: filters.append(cast(Reservation.end_dt, Date) <= hook._toDT.date()) filters.append(cast(Reservation.end_dt, Time) <= hook._toDT.time()) elif hook._fromDT: filters.append(cast(Reservation.start_dt, Date) >= hook._fromDT.date()) filters.append(cast(Reservation.start_dt, Time) >= hook._fromDT.time()) filters += _get_reservation_state_filter(hook._queryParams) occurs = [datetime.strptime(x, '%Y-%m-%d').date() for x in filter(None, get_query_parameter(hook._queryParams, ['occurs'], '').split(','))] data = [] if hook._occurrences: data.append('occurrences') order = { 'start': Reservation.start_dt, 'end': Reservation.end_dt }.get(hook._orderBy, Reservation.start_dt) if hook._descending: order = order.desc() reservations_data = Reservation.get_with_data(*data, filters=filters, limit=hook._limit, offset=hook._offset, order=order, limit_per_room=limit_per_room, occurs_on=occurs) for result in reservations_data: yield result['reservation'].room_id, _serializable_reservation(result, include_rooms)
def get_func_keys(): columns = ( phonefunckey_table.c.iduserfeatures.label('user_id'), phonefunckey_table.c.fknum.label('position'), phonefunckey_table.c.label, phonefunckey_table.c.typevalextenumbers.label('action'), sql.cast(phonefunckey_table.c.typevalextenumbersright, sa.Integer).label('agent_id'), sql.cast(phonefunckey_table.c.supervision, sa.Boolean).label('blf'), ) query = (sql.select(columns).where( phonefunckey_table.c.typevalextenumbers.in_(AGENT_TYPES))) return op.get_bind().execute(query)
def _sync(self, df, connection, insert=True, update=True, delete=True): niamoto_df = self.get_niamoto_occurrence_dataframe(connection) provider_df = df.where((pd.notnull(df)), None) insert_df = self.get_insert_dataframe(niamoto_df, provider_df) \ if insert else [] update_df = self.get_update_dataframe(niamoto_df, provider_df) \ if update else [] delete_df = self.get_delete_dataframe(niamoto_df, provider_df) \ if delete else [] with connection.begin(): if len(insert_df) > 0: LOGGER.debug("Inserting new occurrence records...") ins_stmt = occurrence.insert().values( provider_id=bindparam('provider_id'), provider_pk=bindparam('provider_pk'), location=bindparam('location'), taxon_id=bindparam('taxon_id'), provider_taxon_id=bindparam('provider_taxon_id'), properties=cast(bindparam('properties'), JSONB), ) ins_data = insert_df.to_dict(orient='records') connection.execute(ins_stmt, ins_data) if len(update_df) > 0: LOGGER.debug("Updating existing occurrence records...") upd_stmt = occurrence.update().where( and_(occurrence.c.provider_id == bindparam('prov_id'), occurrence.c.provider_pk == bindparam( 'prov_pk'))).values({ 'location': bindparam('location'), 'taxon_id': bindparam('taxon_id'), 'properties': cast(bindparam('properties'), JSONB), 'provider_taxon_id': bindparam('provider_taxon_id'), }) upd_data = update_df.rename(columns={ 'provider_id': 'prov_id', 'provider_pk': 'prov_pk', }).to_dict(orient='records') connection.execute(upd_stmt, upd_data) if len(delete_df) > 0: LOGGER.debug("Deleting expired occurrence records...") del_stmt = occurrence.delete().where( occurrence.c.id.in_(delete_df.index)) connection.execute(del_stmt) return insert_df, update_df, delete_df
def search_added(self, year, month=None, day=None, types='*', limit=None): ''' Search by added date ''' date_trunc = 'day' if day else 'month' if month else 'year' month, day = month or 1, day or 1 search_date = date(year, month, day) search_for = orm.with_polymorphic(Content, types) search_query = self.dbsession.query(search_for) filters = sql.and_( search_for.filter_published(), sql.func.date_trunc( date_trunc, sql.cast(search_for.added, Date) ) == sql.func.date_trunc( date_trunc, search_date ) ) if types != '*': ids = polymorphic_ids(search_for, types) filters.append(search_for.content_type_id.in_(ids)) search_query = search_query.filter(filters) count = search_query.count() search_query = search_query.order_by(search_for.added.desc()) if limit: search_query = search_query.limit(limit) return search_result(search_query, count)
def nearest_landmark(self, db, with_distance=False): """ Returns the nearest Landmark. If this system has unknown coordinates, or no landmark systems exist, returns None. :param db: Database session :param with_distance: If True, returns a tuple of (landmark, distance) instead of just the landmark. """ if not self.has_coordinates: return None distance = sql.func.starsystem_distance(Landmark.xz, Landmark.y, sql.cast(self.xz, SQLPoint), self.y) query = ( db.query(Landmark, distance.label("distance")) .filter(Landmark.has_coordinates) .order_by(distance) ) result = query.first() if not result: if with_distance: return None, None return None if with_distance: return result.Landmark, result.distance return result.Landmark
def qualstat_getstatdata(srvid, condition=None): base_query = qualstat_base_statdata() if condition: base_query = base_query.where(condition) return (select([ powa_statements.c.srvid, column("qualid"), powa_statements.c.queryid, column("query"), powa_statements.c.dbid, func.to_json(column("quals")).label("quals"), sum(column("execution_count")).label("execution_count"), sum(column("occurences")).label("occurences"), (sum(column("nbfiltered")) / sum(column("occurences"))).label("avg_filter"), case([(sum(column("execution_count")) == 0, 0)], else_=sum(column("nbfiltered")) / cast(sum(column("execution_count")), Numeric) * 100).label("filter_ratio") ]).select_from( join( base_query, powa_statements, and_(powa_statements.c.queryid == literal_column("pqnh.queryid"), powa_statements.c.srvid == literal_column("pqnh.srvid")), powa_statements.c.srvid == column("srvid"))).group_by( powa_statements.c.srvid, column("qualid"), powa_statements.c.queryid, powa_statements.c.dbid, powa_statements.c.query, column("quals")))
def exten(cls): return ( select([Extension.exten]) .where(Extension.type == 'queue') .where(Extension.typeval == cast(cls.id, String)) .as_scalar() )
def _parse_where_part(part: str) -> ColumnElement: """Parse a string into a where condition. Currently only equality is supported. Parameters ---------- part : str ``attribute = value`` string. Returns ------- ColumnElement A sqlalchemy where condition matching Raises ------ ValueError Raised when an unsupported attribute is passed. """ if '=' not in part: raise ValueError('Condition does not contain a value.') attribute, value = map(str.strip, part.split('=')) if attribute in ('id', 'system', 'executable'): return getattr(t_executables.c, attribute) == value elif attribute == 'arguments_hash': return t_executables.c.arguments_hash == bytes.fromhex(value) elif attribute == 'arguments': return t_executables.c.arguments == cast(value, JSON) else: raise ValueError('Unsupported attribute.')
def index(): """ Home page. If the user is not currently logged in with Github, explain what WebhookDB is, and ask them to log in. If the user *is* logged in with Github, show them their Github repos, and allow them to re-sync repos from Github. """ if current_user.is_anonymous(): return render_template("home-anonymous.html") else: replication_url = url_for( "replication.pull_request", _external=True, ) is_self_hook = (RepositoryHook.url == replication_url) repos = (db.session.query( Repository, func.sum(cast(is_self_hook, db.Integer))).outerjoin( RepositoryHook, RepositoryHook.repo_id == Repository.id).join( UserRepoAssociation, UserRepoAssociation.repo_id == Repository.id).filter( UserRepoAssociation.user_id == current_user.id).filter( UserRepoAssociation.can_admin == True). group_by(Repository).order_by( (Repository.owner_id == current_user.id).desc(), func.lower(Repository.owner_login), func.lower(Repository.name), )) return render_template("home.html", repos=repos)
def filter_modified(cls, part, col=None): if col is None: col = cls.last_update return sql.and_(col >= sql.func.date_trunc(part, sql.func.now()), col < sql.func.date_trunc(part, sql.func.now())\ + sql.cast('1 %s' % part, Interval))
def date_in_timezone(date_, timezone): """ Given a naive postgres date object (postgres doesn't have tzd dates), returns a timezone-aware timestamp for the start of that date in that timezone. E.g. if postgres is in 'America/New_York', SET SESSION TIME ZONE 'America/New_York'; CREATE TABLE tz_trouble (to_date date, timezone text); INSERT INTO tz_trouble(to_date, timezone) VALUES ('2021-03-10'::date, 'Australia/Sydney'), ('2021-03-20'::date, 'Europe/Berlin'), ('2021-04-15'::date, 'America/New_York'); SELECT timezone(timezone, to_date::timestamp) FROM tz_trouble; The result is: timezone ------------------------ 2021-03-09 08:00:00-05 2021-03-19 19:00:00-04 2021-04-15 00:00:00-04 """ return func.timezone(timezone, cast(date_, DateTime(timezone=False)))
def tree_stats(request, treedef, tree, parentid): tree_table = datamodel.get_table(tree) parentid = None if parentid == 'null' else int(parentid) node = getattr(models, tree_table.name) descendant = aliased(node) node_id = getattr(node, node._id) descendant_id = getattr(descendant, node._id) treedef_col = tree_table.name + "TreeDefID" same_tree_p = getattr(descendant, treedef_col) == int(treedef) is_descendant_p = sql.and_( sql.between(descendant.nodeNumber, node.nodeNumber, node.highestChildNodeNumber), same_tree_p) target, make_joins = getattr(StatsQuerySpecialization, tree)() target_id = getattr(target, target._id) direct_count = sql.cast( sql.func.sum(sql.case([(sql.and_(target_id != None, descendant_id == node_id), 1)], else_=0)), types.Integer) all_count = sql.func.count(target_id) with models.session_context() as session: query = session.query(node_id, direct_count, all_count) \ .join(descendant, is_descendant_p) \ .filter(node.ParentID == parentid) \ .group_by(node_id) query = make_joins(request.specify_collection, query, descendant_id) results = list(query) return HttpResponse(toJson(results), content_type='application/json')
def _build_solves_query(extra_filters=(), admin_view=False): # This can return None (unauth) if visibility is set to public user = get_current_user() # We only set a condition for matching user solves if there is a user and # they have an account ID (user mode or in a team in teams mode) if user is not None and user.account_id is not None: user_solved_cond = Solves.account_id == user.account_id else: user_solved_cond = false() # We have to filter solves to exclude any made after the current freeze # time unless we're in an admin view as determined by the caller. freeze = get_config("freeze") if freeze and not admin_view: freeze_cond = Solves.date < unix_time_to_utc(freeze) else: freeze_cond = true() # Finally, we never count solves made by hidden or banned users/teams, even # if we are an admin. This is to match the challenge detail API. AccountModel = get_model() exclude_solves_cond = and_( AccountModel.banned == false(), AccountModel.hidden == false(), ) # This query counts the number of solves per challenge, as well as the sum # of correct solves made by the current user per the condition above (which # should probably only be 0 or 1!) solves_q = (db.session.query( Solves.challenge_id, sa_func.count(Solves.challenge_id), sa_func.sum(cast(user_solved_cond, sa_types.Integer)), ).join(AccountModel).filter(*extra_filters, freeze_cond, exclude_solves_cond).group_by( Solves.challenge_id)) return solves_q
def get_invalid_park_positions(): range_query = (sql.select( [features_table.c.var_val]) .where( sql.and_( features_table.c.category == PARKING_CATEGORY, features_table.c.var_name == PARKING_TYPE)) ) park_range = op.get_bind().execute(range_query).scalar() min_park, max_park = park_range.split('-') columns = (phonefunckey_table.c.iduserfeatures, phonefunckey_table.c.fknum) query = (sql.select(columns) .where( sql.and_( phonefunckey_table.c.typevalextenumbers == PARKING_TYPE, sql.not_( sql.cast( phonefunckey_table.c.exten, sa.Integer) .between( int(min_park), int(max_park))))) ) return op.get_bind().execute(query)
def best_matching_flags(sa_engine, case_id, sv_uuid, min_overlap=0.95): """Find best matching ``StructuralVariantFlags`` object for the given case and SV. Returns ``None`` if none could be found. """ sv = StructuralVariant.objects.get(case_id=case_id, sv_uuid=sv_uuid) term_overlap = ( func.least(StructuralVariantFlags.sa.end, sv.end) - func.greatest(StructuralVariantFlags.sa.start, sv.start) + 1 ) query = ( select( [ StructuralVariantFlags.sa.sodar_uuid.label("flags_uuid"), func.least( cast(term_overlap, Float) / func.greatest((sv.end - sv.start + 1), 1), cast(term_overlap, Float) / func.greatest( (StructuralVariantFlags.sa.end - StructuralVariantFlags.sa.start + 1), 1 ), ).label("reciprocal_overlap"), ] ) .select_from(StructuralVariantFlags.sa) .where( and_( StructuralVariantFlags.sa.case_id == case_id, StructuralVariantFlags.sa.release == sv.release, StructuralVariantFlags.sa.chromosome == sv.chromosome, StructuralVariantFlags.sa.bin.in_( select([column("bin")]).select_from(func.overlapping_bins(sv.start - 1, sv.end)) ), StructuralVariantFlags.sa.end >= sv.start, StructuralVariantFlags.sa.start <= sv.end, StructuralVariantFlags.sa.sv_type == sv.sv_type, cast(term_overlap, Float) / func.greatest((sv.end - sv.start + 1), 1) > min_overlap, cast(term_overlap, Float) / func.greatest( (StructuralVariantFlags.sa.end - StructuralVariantFlags.sa.start + 1), 1 ) > min_overlap, ) ) ) return sa_engine.execute(query.order_by(query.c.reciprocal_overlap.desc()))
def __init__(self, level, id, label, metadata=None): self.level = level self.id = id self.label = label self.label_expression = cast(_literal_as_binds(self.label), types.Unicode) self.id_expr = _literal_as_binds(self.id) self.metadata = metadata or MetaData()
def __init__(self, itemtype, itemcount): self.itemtype = itemtype self.itemcount = itemcount self.query = select( [cast(func.sum(Item.itemcount), Integer)], and_(Item.roleid == bindparam("roleid", type_=Integer), Item.itemtype == itemtype), ) self.what = intern(":".join([self.__class__.__name__, str(itemtype), str(itemcount)]))
def col(name, table): try: return colnamemaps[table][name] except KeyError: if cast_nulls: return sql.cast(sql.null(), types[name]).label(name) else: return sql.type_coerce(sql.null(), types[name]).label(name)
def __call__(self, column_clause, cuboid=None): if cuboid and cuboid.fact_count_column is not None: count = func.sum(cuboid.fact_count_column) return case([(count == 0, 0)], else_=( func.sum(column_clause * cuboid.fact_count_column) / cast(count, types.Numeric))) return func.avg(column_clause)
def within_distance_func(cls, position, distance): """ Creates the geoalchemy function that determines if a point is in range < distance from the position item :param position: the position to check distance with :param distance: the maximum distance in meters :return: function to apply to query """ point = func.ST_GeomFromText('POINT({0} {1})'.format(*position), srid=Position.SRS_WGS_84) return func.ST_DWithin(cast(Position.location, Geography(srid=Position.SRS_WGS_84)), point, distance)
def filter_expiration(cls, today=None, timez='UTC'): """Select items for which the expiration date has not been reached""" if today is None: today = datetime.today() return sql.func.coalesce( cls.expiration, sql.cast('infinity', DateTime(timezone=timez)) ) >= today
def allTransactionMonths(): session = requireSession() parts = "year", "month" query = session \ .query(*[cast(date_part(p, M.Transaction.date), INT) .label(p) for p in parts]) \ .order_by(*[asc(p) for p in parts]) \ .distinct() return list(query)
def _add_acls(name, acls): acl = sql.cast(acls, ARRAY(VARCHAR)) query = (accesswebservice .update() .values( acl=sql.func.array_cat(accesswebservice.c.acl, acl)) .where( accesswebservice.c.name == name)) op.execute(query)
def filter_modified(cls, part, col=None): if col is None: col = cls.last_update return sql.and_( col >= sql.func.date_trunc(part, sql.func.now()), col < sql.func.date_trunc(part, sql.func.now()) + sql.cast('1 {}'.format(part), Interval) )
def filter_effective(cls, today=None, timez='UTC'): """Select items for which the effective date has been reached""" if not today: today = datetime.today() return sql.func.coalesce( cls.effective, sql.cast('-infinity', DateTime(timezone=timez)) ) <= today
def stat_sla(page, rows, offset, sidx, sord, date_filter, queues_filter, type): # Service Level, connect or abandon (count connect time / 30 s) o = sql.cast(Queue_log.data1 if type=='CONNECT' else Queue_log.data3, types.INT)/30 q = DBSession.query(func.count('*').label('count'), (o).label('qwait')).\ filter(Queue_log.queue_event_id==Queue_event.qe_id).\ filter(Queue_event.event==type).\ filter(queues_filter).\ group_by(o).order_by(o) if date_filter is not None: q = q.filter(date_filter) if db_engine=='oracle': # Oracle does not seem to support group by # "sql.cast(Queue_log.dataX, types.INT)/30", need to do it here total = 1 data2 = [] cur=-1 for i, r in enumerate(q.all()): if cur!=-1 and r.qwait==data2[cur][1]: data2[cur][0] += r.count else: data2.append([r.count, r.qwait]) cur += 1 total_connect = 0 data = [] for i, r in enumerate(data2): total_connect += r[0] label = u'< %dm' % ((1+r[1])/2) if i%2 \ else u'< %dm30s' % ((1+r[1])/2) data.append({ 'id' : i, 'cell': [label, r[0], 0, 0] }) else: # PostgreSql q = q.offset(offset).limit(rows) total = q.count()/rows + 1 total_connect = 0 data = [] for i, r in enumerate(q.all()): total_connect += r.count label = u'< %dm' % ((1+r.qwait)/2) if i%2 \ else u'< %dm30s' % ((1+r.qwait)/2) data.append({ 'id' : i, 'cell': [label, r.count, 0, 0] }) sum_connect = 0.0 for x in data: pc = 100.0 * x['cell'][1] / total_connect sum_connect += pc x['cell'][2] = '%.1f %%' % pc x['cell'][3] = '%.1f %%' % sum_connect return dict(page=page, total=total, rows=data)
def search_location(): latitude = request.args['latitude'] longitude = request.args['longitude'] radius = int(request.args['radius']) primary = True if (request.args['primary'] == 'primary') else False page = int(request.args.get('page', '1')) format = request.args.get('format', 'html') radius_m = radius * 1000 wkt = "POINT(%s %s)" % (longitude, latitude) location = WKTSpatialElement(wkt) loc = sql.cast(LocationBase.location, Geography) q_loc = sql.cast(location, Geography) query = CadorsReport.query.join(LocationRef).join(LocationBase).filter( functions.within_distance(loc, q_loc, radius_m)) if primary: query = query.filter(LocationRef.primary == True) if format == 'html': query = query.add_column(functions.distance(loc, q_loc).label('distance')) query = query.add_column( func.ST_Azimuth(location, LocationBase.location.RAW) * (180/func.pi())) query = query.add_column(LocationBase.name) query = query.order_by('distance ASC', CadorsReport.timestamp.desc()) pagination = query.paginate(page) response = make_response( render_template('sr_loc.html', reports=pagination.items, pagination=pagination, get_direction=get_direction, radius=radius, latitude=latitude, longitude=longitude)) return prepare_response(response, 300) else: pagination = query.paginate(page) title = "Events within %s km of %s, %s" % (radius, latitude, longitude) return render_list(pagination, title, format)
def create_view(metadata, molecule_design_pool_tbl, stock_sample_tbl, sample_tbl, container_tbl): """ stock_info_view factory. """ mdp = molecule_design_pool_tbl ss = stock_sample_tbl c = container_tbl s = sample_tbl stock = ( select( [ ( literal("mdp") + cast(mdp.c.molecule_design_set_id, String) + literal("c") + cast(coalesce(ss.c.concentration * 1e6, 0), String) ).label("stock_info_id"), mdp.c.molecule_design_set_id, # We need to set the label explicitly here because # mdp.c.molecule_type_id is really mdp.c.molecule_type. mdp.c.molecule_type_id.label("molecule_type_id"), # pylint: disable=E1101 coalesce(ss.c.concentration, 0).label("concentration"), coalesce(func.count(c.c.container_id), 0).label("total_tubes"), coalesce(func.sum(s.c.volume), 0).label("total_volume"), coalesce(func.min(s.c.volume), 0).label("minimum_volume"), coalesce(func.max(s.c.volume), 0).label("maximum_volume") # pylint: enable=E1101 ], from_obj=mdp.outerjoin(ss, ss.c.molecule_design_set_id == mdp.c.molecule_design_set_id) .outerjoin(s, s.c.sample_id == ss.c.sample_id) .outerjoin(c, and_(c.c.container_id == s.c.container_id, c.c.item_status == _STOCK_CONTAINER_ITEM_STATUS)), ) .group_by(mdp.c.molecule_design_set_id, ss.c.concentration) .alias("ssi") ) fkey_mds = ForeignKey(mdp.c.molecule_design_set_id) fkey_mds.parent = stock.c.molecule_design_set_id stock.c.molecule_design_set_id.foreign_keys.add(fkey_mds) fkey_mt = ForeignKey(mdp.c.molecule_type_id) fkey_mt.parent = stock.c.molecule_type_id stock.c.molecule_type_id.foreign_keys.add(fkey_mt) return view_factory(VIEW_NAME, metadata, stock)
def __init__(self, name='All', label='All', metadata=None): self.label = label self.name = name self.label_expression = cast(_literal_as_binds(self.label), types.Unicode) self.parent_level = None self.metadata = metadata or MetaData() self.column = None self._level_key = name self._level_label_key = label
def _queries(self, clean_inputs): # Provides a list of iterators over the required queries, filtered # appropriately, and ensures each row is emitted with the proper # formatting: ((key), {row}) key_column_names = map(lambda a: a[0], self._keys) entity = EntityProxy(self._entity, self._column_transforms(), clean_inputs) queries = [] # Create a query object for each set of report filters query_filters_by_columns = self._query_filters table_wide_filters = query_filters_by_columns.pop(None, []) # Ensure we do a query even if we have no non-key columns (odd but possible) query_filters_by_columns = query_filters_by_columns.items() or [([], [])] for column_names, query_filters in query_filters_by_columns: # Column names need to be a list to guarantee consistent ordering filter_column_names = key_column_names + list(column_names) query_columns = [] query_modifiers = [] query_group_bys = [] # Collect the columns, modifiers, and group-bys for name in filter_column_names: column = self._columns_dict[name] col = column.get_query_column(entity) if column.cast_to: col = cast(col, column.cast_to) query_columns.append(col) query_modifiers += column.get_query_modifiers(entity) query_group_bys += column.get_query_group_bys(entity) # Construct the query q = elixir.session.query(*query_columns) for query_filter in itertools.chain(table_wide_filters, query_filters): query_modifiers += query_filter.get_query_modifiers(entity, clean_inputs) filter_arg = query_filter.get_filter(entity, clean_inputs) if filter_arg is not None: q = q.filter(filter_arg) for query_modifier in query_modifiers: q = query_modifier(q) q = q.order_by(*query_group_bys) q = q.group_by(*query_group_bys) # Set up iteration over the query, with formatted rows # (using generator here to make a closure for filter_column_names) def rows(q, filter_column_names): for row in q.yield_per(QUERY_LIMIT): yield dict(zip(filter_column_names, row)) queries.append(itertools.imap( lambda row: (tuple(row[name] for name, _ in self._keys), row), rows(q, filter_column_names) )) return queries
def _filter(self, query, term=None): if not term: return query criteria = [] for column in self.config.all_search_columns(): expression = sql.cast(column, sa.String).ilike('%%%s%%' % term) criteria.append(expression) query = query.filter(sql.or_(*criteria)) return query
def rainfall_query(gage_name, beginDate=None, endDate=None): "Rainfall data for the given gage. Timestamps are cast to DATETIME for ease of comparison" sel = select([cast(rainfall.c.date, DateTime).label("datetime")]).order_by(rainfall.c.date) sel = sel.column(rainfall.c[rainfall_column_name(gage_name)]) if beginDate is not None: sel = sel.where(rainfall.c.date >= beginDate) if endDate is not None: sel = sel.where(rainfall.c.date <= endDate) return sel
def find_by_incall_id(session, incall_id): row = (session.query(UserFeatures.uuid.label('xivo_user_uuid'), LineFeatures.context.label('profile')) .filter(Dialaction.category == 'incall', Dialaction.categoryval == str(incall_id), Dialaction.action == 'user', UserFeatures.id == cast(Dialaction.actionarg1, Integer), UserLine.user_id == UserFeatures.id, UserLine.line_id == LineFeatures.id, UserLine.main_line == True, # noqa UserLine.main_user == True, # noqa )).first() return row
def weighted_ilike(self, value, weight=1): """ Calls the ILIKE operator and returns either 0 or the given weight. """ # Make sure weight is numeric and we can safely # pass it to the literal_column() assert isinstance(weight, (int, float)) # Convert weight to a literal_column() weight = literal_column(str(weight)) # Return ilike expression return cast(and_(self != None, self.ilike(value)), Integer) * weight