def render_GET(self, request): articles = request.dbsession\ .query(Article)\ .filter(Article.enabled)\ .order_by(Article.last_modified.desc(), Article.uid.desc())\ .options( subqueryload(Article.user))[0:NEW_ITEM_COUNT] replies = request.dbsession\ .query(Reply)\ .filter(Reply.enabled)\ .order_by(Reply.last_modified.desc(), Reply.uid.desc())\ .options(subqueryload(Reply.user))[0:NEW_ITEM_COUNT] articles_packed = [pack_article(article) for article in articles] replies_packed = [pack_reply(reply) for reply in replies] items = list() items.extend(articles_packed) items.extend(replies_packed) items = sorted(items, key=lambda i: (i["last_modified"], i["uid"]), reverse=True)[0:NEW_ITEM_COUNT] page = request.get_argument_int("page", 1) page_total = len(items) / ARTICLE_PER_PAGE if len(items) % ARTICLE_PER_PAGE != 0: page_total += 1 start_idx = ARTICLE_PER_PAGE * (page - 1) end_idx = start_idx + ARTICLE_PER_PAGE items = items[start_idx:end_idx] context = { "items": items, "page": page, "page_total": page_total, } return render_template("new.html", request, context)
def render_params_for_user_tests(self, query, page, page_size=50): """Add data about the requested user tests to r_params. query (sqlalchemy.orm.query.Query): the query giving back all interesting user tests. page (int): the index of the page to display. page_size(int): the number of submissions per page. """ query = query\ .options(subqueryload(UserTest.task))\ .options(subqueryload(UserTest.participation))\ .options(subqueryload(UserTest.files))\ .options(subqueryload(UserTest.results))\ .order_by(UserTest.timestamp.desc()) offset = page * page_size count = query.count() if self.r_params is None: self.r_params = self.render_params() self.r_params["user_test_count"] = count self.r_params["user_tests"] = \ query.slice(offset, offset + page_size).all() self.r_params["user_test_page"] = page self.r_params["user_test_pages"] = \ (count + page_size - 1) // page_size
def _eager_loaded_annotations(session): return session.query(models.Annotation).options( subqueryload(models.Annotation.document).subqueryload(models.Document.document_uris), subqueryload(models.Annotation.document).subqueryload(models.Document.meta), subqueryload(models.Annotation.moderation), subqueryload(models.Annotation.thread).load_only("id"), )
def test_subqueryload(self): self.assertEqual( str(self.db.query(Foo).subqueryload('bars')), str(self.db.query(Foo).options(orm.subqueryload('bars'))) ) self.assertEqual( str(self.db.query(Foo).subqueryload('bars', 'bazs')), str((self.db.query(Foo) .options(orm.subqueryload('bars').subqueryload('bazs')))) ) self.assertEqual( str(self.db.query(Foo).subqueryload(Foo.bars)), str(self.db.query(Foo).options(orm.subqueryload(Foo.bars))) ) self.assertEqual( str(self.db.query(Foo).subqueryload(Foo.bars, Bar.bazs)), str((self.db.query(Foo) .options(orm.subqueryload(Foo.bars).subqueryload(Bar.bazs)))) ) self.assertEqual( str((self.db.query(Foo) .subqueryload( 'bars', options=[LoadOption('subqueryload', 'bazs')]))), str((self.db.query(Foo) .options(orm.subqueryload('bars').subqueryload('bazs')))) )
def render_params_for_submissions(self, query, page, page_size=50): """Add data about the requested submissions to r_params. query (sqlalchemy.orm.query.Query): the query giving back all interesting submissions. page (int): the index of the page to display. page_size(int): the number of submissions per page. """ query = query\ .options(subqueryload(Submission.task))\ .options(subqueryload(Submission.participation))\ .options(subqueryload(Submission.files))\ .options(subqueryload(Submission.token))\ .options(subqueryload(Submission.results) .subqueryload(SubmissionResult.evaluations))\ .order_by(Submission.timestamp.desc()) offset = page * page_size count = query.count() if self.r_params is None: self.r_params = self.render_params() # A page showing paginated submissions can use these # parameters: total number of submissions, submissions to # display in this page, index of the current page, total # number of pages. self.r_params["submission_count"] = count self.r_params["submissions"] = \ query.slice(offset, offset + page_size).all() self.r_params["submission_page"] = page self.r_params["submission_pages"] = \ (count + page_size - 1) // page_size
def eager_query(cls): """Eager Query""" query = super(Roleable, cls).eager_query() return cls.eager_inclusions( query, Roleable._include_links ).options( orm.subqueryload( '_access_control_list' ).joinedload( "person" ).undefer_group( 'Person_complete' ), orm.subqueryload( '_access_control_list' ).joinedload( "person" ).subqueryload( "contexts" ).undefer_group( 'Context_complete' ), orm.subqueryload( '_access_control_list' ).joinedload( "ac_role" ).undefer_group( 'AccessControlRole_complete' ), )
def render(self, session, **arguments): q = session.query(Chassis) q = q.options(subqueryload('model'), joinedload('model.machine_specs'), subqueryload('location'), joinedload('slots'), subqueryload('slots.machine'), # A rare case when we don't need primary name/host lazyload('slots.machine.primary_name'), lazyload('slots.machine.host'), subqueryload('interfaces'), joinedload('interfaces.assignments'), joinedload('interfaces.assignments.network'), joinedload('interfaces.assignments.dns_records')) # Prefer the primary name for ordering q = q.outerjoin(DnsRecord, (Fqdn, DnsRecord.fqdn_id == Fqdn.id), DnsDomain) q = q.options(contains_eager('primary_name'), contains_eager('primary_name.fqdn'), contains_eager('primary_name.fqdn.dns_domain')) q = q.order_by(Fqdn.name, DnsDomain.name, Chassis.label) return q.all()
def render(self, session, **arguments): q = session.query(Switch) q = q.options(subqueryload('location'), subqueryload('interfaces'), joinedload('interfaces.assignments'), joinedload('interfaces.assignments.dns_records'), joinedload('interfaces.assignments.network'), subqueryload('observed_macs'), undefer('observed_macs.creation_date'), subqueryload('observed_vlans'), undefer('observed_vlans.creation_date'), joinedload('observed_vlans.network'), subqueryload('model'), # Switches don't have machine specs, but the formatter # checks for their existence anyway joinedload('model.machine_specs')) # Prefer the primary name for ordering q = q.outerjoin(DnsRecord, (Fqdn, DnsRecord.fqdn_id == Fqdn.id), DnsDomain) q = q.options(contains_eager('primary_name'), contains_eager('primary_name.fqdn'), contains_eager('primary_name.fqdn.dns_domain')) q = q.reset_joinpoint() q = q.order_by(Fqdn.name, DnsDomain.name, Switch.label) return q.all()
def eager_query(cls): from sqlalchemy import orm query = super(Relatable, cls).eager_query() return cls.eager_inclusions(query, Relatable._include_links).options( orm.subqueryload('related_sources'), orm.subqueryload('related_destinations'))
def _clone_contribs(self, new_event): attrs = (get_simple_column_attrs(Contribution) | {'own_room', 'own_venue'}) - {'abstract_id'} query = (Contribution.query.with_parent(self.old_event) .options(undefer('_last_friendly_subcontribution_id'), joinedload('own_venue'), joinedload('own_room').lazyload('*'), joinedload('session'), joinedload('session_block').lazyload('session'), joinedload('type'), subqueryload('acl_entries'), subqueryload('subcontributions').joinedload('references'), subqueryload('references'), subqueryload('person_links'), subqueryload('field_values'))) for old_contrib in query: contrib = Contribution() contrib.populate_from_attrs(old_contrib, attrs) contrib.subcontributions = list(self._clone_subcontribs(old_contrib.subcontributions)) contrib.acl_entries = clone_principals(ContributionPrincipal, old_contrib.acl_entries) contrib.references = list(self._clone_references(ContributionReference, old_contrib.references)) contrib.person_links = list(self._clone_person_links(ContributionPersonLink, old_contrib.person_links)) contrib.field_values = list(self._clone_fields(old_contrib.field_values)) if old_contrib.type is not None: contrib.type = self._contrib_type_map[old_contrib.type] if old_contrib.session is not None: contrib.session = self._session_map[old_contrib.session] if old_contrib.session_block is not None: contrib.session_block = self._session_block_map[old_contrib.session_block] new_event.contributions.append(contrib) self._contrib_map[old_contrib] = contrib
def render(self, session, cluster, **arguments): q = session.query(Cluster) vm_q = session.query(VirtualMachine) vm_q = vm_q.join(ClusterResource, Cluster) if cluster: q = q.filter_by(name=cluster) vm_q = vm_q.filter_by(name=cluster) vm_q = vm_q.options(joinedload('machine'), joinedload('machine.primary_name'), joinedload('machine.primary_name.fqdn'), lazyload('machine.host')) q = q.options(subqueryload('_hosts'), joinedload('_hosts.host'), joinedload('_hosts.host.machine'), subqueryload('_metacluster'), joinedload('_metacluster.metacluster'), joinedload('resholder'), subqueryload('resholder.resources'), subqueryload('service_bindings'), subqueryload('allowed_personalities')) q = q.order_by(Cluster.name) dbclusters = q.all() if cluster and not dbclusters: raise NotFoundException("Cluster %s not found." % cluster) # Manual eager-loading of VM resources. All the code does is making sure # the data is pinned in the session's cache machines = {} for vm in vm_q: machines[vm.machine.machine_id] = vm return ClusterList(dbclusters)
def group(grp): db = current_app.config['DB']() collections = list(queries.collections(db)) group = db.query(tables.Group).get(grp) if group is None: abort(404) query = db.query(tables.Package) query = query.join(tables.Package.group_packages) query = query.join(tables.GroupPackage.group) query = query.join(tables.Package.status_obj) query = query.filter(tables.Group.ident == grp) query = query.order_by(-tables.Status.weight) query = queries.order_by_name(db, query) query = query.options(subqueryload('collection_packages')) query = query.options(subqueryload('collection_packages.links')) packages = list(query) query = query.filter(tables.GroupPackage.is_seed) seed_groups = query return render_template( 'group.html', breadcrumbs=( (url_for('hello'), 'Python 3 Porting Database'), (url_for('group', grp=grp), group.name), ), collections=collections, grp=group, packages=packages, len_packages=len(packages), deptree=list(gen_deptree(seed_groups)), status_counts=get_status_counts(packages), )
def finalize_query(self, query, fltr, session, qstring=None, order_by=None): search_query = None ranked = False if qstring is not None: ft_query = and_(SearchObjectIndex.so_uuid == ObjectInfoIndex.uuid, query) q = session.query( ObjectInfoIndex, func.ts_rank_cd( SearchObjectIndex.search_vector, func.plainto_tsquery(qstring) ).label('rank'))\ .options(subqueryload(ObjectInfoIndex.search_object))\ .options(subqueryload(ObjectInfoIndex.properties)).filter(ft_query) query_result = search(q, qstring, vector=SearchObjectIndex.search_vector, sort=order_by is None, regconfig='simple') ranked = True else: query_result = session.query(ObjectInfoIndex).options(subqueryload(ObjectInfoIndex.properties)).filter(query) if order_by is not None: query_result = query_result.order_by(order_by) elif ranked is True: query_result = query_result.order_by( desc( func.ts_rank_cd( SearchObjectIndex.search_vector, func.to_tsquery(search_query) ) ) ) if 'limit' in fltr: query_result = query_result.limit(fltr['limit']) return query_result, ranked
def lookup(self, department_id, start_time=None, end_time=None): """ Returns a list of all shifts for the given department. Takes the department id as the first parameter. For a list of all department ids call the "dept.list" method. Optionally, takes a "start_time" and "end_time" to constrain the results to a given date range. Dates may be given in any format supported by the <a href="http://dateutil.readthedocs.io/en/stable/parser.html"> dateutil parser</a>, plus the string "now". Unless otherwise specified, "start_time" and "end_time" are assumed to be in the local timezone of the event. """ with Session() as session: query = session.query(Job).filter_by(department_id=department_id) if start_time: start_time = _parse_datetime(start_time) query = query.filter(Job.start_time >= start_time) if end_time: end_time = _parse_datetime(end_time) query = query.filter(Job.start_time <= end_time) query = query.options( subqueryload(Job.department), subqueryload(Job.shifts).subqueryload(Shift.attendee)) return [job.to_dict(self.fields) for job in query]
def eager_query(cls): from sqlalchemy import orm query = super(ControlControl, cls).eager_query() return query.options( orm.subqueryload('control'), orm.subqueryload('implemented_control'))
def get_overcloud_by_id(self, overcloud_id): """Returns a specific overcloud instance. :return: overcloud if one exists with the given ID :rtype: tuskar.db.sqlalchemy.models.Overcloud :raises: tuskar.common.exception.OvercloudNotFound: if no overcloud with the given ID exists """ session = get_session() try: query = ( session.query(models.Overcloud). options(subqueryload(models.Overcloud.attributes)). options(subqueryload(models.Overcloud.counts)). options(subqueryload('counts.overcloud_role')). filter_by(id=overcloud_id) ) result = query.one() except NoResultFound: raise exception.OvercloudNotFound() finally: session.close() return result
def eager_query(cls): from sqlalchemy import orm query = super(UserRole, cls).eager_query() return query.options( orm.subqueryload('role'), orm.subqueryload('person'))
def eager_query(cls, **kwargs): query = super(Snapshot, cls).eager_query(**kwargs) return cls.eager_inclusions(query, Snapshot._include_links).options( orm.subqueryload('revision'), orm.subqueryload('revisions'), orm.joinedload('audit').load_only("id", "archived"), )
def overview_xlsx(self, out, session): checklist = list(DeptChecklistConf.instances.values()) departments = session.query(Department).options( subqueryload(Department.members_who_can_admin_checklist), subqueryload(Department.dept_checklist_items)) \ .order_by(Department.name) header_row = ['Department'] header_row.extend(item.name for item in checklist) header_row.extend(['Emails']) out.writerow(header_row) for dept in departments: out.writecell(dept.name) for item in checklist: checklist_item = dept.checklist_item_for_slug(item.slug) if checklist_item: out.writecell('', {'bg_color': 'green'}) elif days_before(7, item.deadline)(): out.writecell('', {'bg_color': 'orange'}) elif item.deadline < datetime.now(UTC): out.writecell('', {'bg_color': 'red'}) else: out.writecell('') out.writecell(', '.join([admin.email for admin in dept.checklist_admins]), last_cell=True)
def query_factory(): query = self._session.query(Stop) if prefetch_parent: query = query.options(subqueryload('parent_station')) if prefetch_substops: query = query.options(subqueryload('sub_stops')) return query
def _attendee_hotel_nights(session): query = session.query(Attendee).filter(Attendee.assigned_depts.any()).options( subqueryload(Attendee.depts_where_working), subqueryload(Attendee.shifts).subqueryload(Shift.job).subqueryload(Job.department), subqueryload(Attendee.hotel_requests)).order_by(Attendee.full_name, Attendee.id) attendee_hotel_nights = [] for attendee in query: if attendee.hotel_requests and attendee.hotel_requests.approved: hotel_nights = set(attendee.hotel_requests.nights_ints) hotel_shoulder_nights = hotel_nights.difference(c.CORE_NIGHTS) hotel_night_dates = sorted(map(lambda x: c.NIGHT_DATES[c.NIGHTS[x]], hotel_nights)) else: hotel_nights = set() hotel_shoulder_nights = set() hotel_night_dates = [] if hotel_night_dates: first_hotel_night = min(hotel_night_dates) last_hotel_night = max(hotel_night_dates) else: first_hotel_night = None last_hotel_night = None attendee_hotel_nights.append(dict( attendee=attendee, hotel_nights=hotel_nights, hotel_shoulder_nights=hotel_shoulder_nights, hotel_night_dates=hotel_night_dates, first_hotel_night=first_hotel_night, last_hotel_night=last_hotel_night, )) return attendee_hotel_nights
def _get_db_tracks(session, chunk): return (session.query(Track). options(subqueryload(Track.album), subqueryload(Track.artist), subqueryload(Track.lastfm_info)). filter(Track.filename.in_(chunk)). all())
def json_export(outfile): db = Session() data = dict() data["materials"] = [ it.to_dict(["locations"]) for it in db.query(Material).options(subqueryload(Material.locations)) ] data["materialTypes"] = [ dict(label=it[0], value=it[0]) for it in db.query(distinct(Material.type)).order_by(Material.type.asc()) if it[0] ] data["blueprints"] = [ it.to_dict(["ingredients"]) for it in db.query(Blueprint)\ .options(subqueryload(Blueprint.ingredients))\ .options(subqueryload("ingredients.material")) ] data["blueprintTypes"] = [ dict(label=it[0], value=it[0]) for it in db.query(distinct(Blueprint.type)).order_by(Blueprint.type.asc()) if it[0] ] with open(outfile, "w") as fp: fp.write('CollectorDroneData=') json.dump(data, fp) db.close()
def family_query(self): query = self.request.db.query(tcg_tables.CardFamily) query = dbutil.order_by_name(query, tcg_tables.CardFamily) query = query.options(joinedload('names_local')) query = query.options(subqueryload('cards.prints')) query = query.options(subqueryload('cards')) return query
def eager_query(cls): from sqlalchemy import orm query = super(SystemSystem, cls).eager_query() return query.options( orm.subqueryload('parent'), orm.subqueryload('child'))
def index(): query = Event.query() \ .options(subqueryload('actor')) \ .options(subqueryload('user')) \ .options(subqueryload('club')) \ .outerjoin(Event.flight) \ .options(contains_eager(Event.flight)) \ .filter(or_(Event.flight == None, Flight.is_rankable())) \ .order_by(Event.time.desc()) query = _filter_query(query, request.args) page = request.args.get('page', type=int, default=1) per_page = request.args.get('per_page', type=int, default=50) events = query.limit(per_page).offset((page - 1) * per_page).all() events_count = len(events) if request.args.get('grouped', True, type=str_to_bool): events = group_events(events) template_vars = dict(events=events, types=Event.Type) if page > 1: template_vars['prev_page'] = page - 1 if events_count == per_page: template_vars['next_page'] = page + 1 return render_template('timeline/list.jinja', **template_vars)
def get_queryset(self, project=None, db=None, *args, **kwargs): malware_sha256 = self.kwargs["malware_sha256"] if malware_sha256: session = db.Session() malware = session.query(Malware).filter(Malware.sha256 == malware_sha256).one_or_none() if not malware: error = {"error": {"code": "NotFound", "message": "Malware not found: {} (Project: {})".format(malware_sha256, project)}} raise NotFound(detail=error) session = db.Session() malware = session.query(Malware) \ .options(subqueryload(Malware.tag)) \ .options(subqueryload(Malware.analysis)) \ .options(subqueryload(Malware.note)).filter(Malware.sha256 == malware_sha256).one_or_none() queryset = getattr(malware, self.malware_relationship_field) else: session = db.Session() queryset = session.query(self.model) return queryset
def package(pkg): db = current_app.config['DB']() collections = list(queries.collections(db)) package = db.query(tables.Package).get(pkg) if package is None: abort(404) query = queries.dependencies(db, package) query = query.options(subqueryload('collection_packages')) query = query.options(subqueryload('collection_packages.links')) dependencies = list(query) dependents = list(queries.dependents(db, package)) in_progress_deps = [p for p in dependencies if p.status == 'in-progress'] return render_template( 'package.html', breadcrumbs=( (url_for('hello'), 'Python 3 Porting Database'), (url_for('package', pkg=pkg), pkg), ), collections=collections, pkg=package, dependencies=list(dependencies), dependents=list(dependents), deptree=[(package, gen_deptree(dependencies))], in_progress_deps=in_progress_deps, )
def eager_query(cls): from sqlalchemy import orm query = super(ControlSection, cls).eager_query() return query.options( orm.subqueryload('control'), orm.subqueryload('section'))
def stop(self, stop_id, feed_id="", prefetch_parent=True, prefetch_substops=True): query = self._session.query(Stop) if prefetch_parent: query = query.options(subqueryload('parent_station')) if prefetch_substops: query = query.options(subqueryload('sub_stops')) return query.get((feed_id, stop_id))
def get_category_timetable(categ_ids, start_dt, end_dt, detail_level='event', tz=utc, from_categ=None, grouped=True, includible=lambda item: True): """Retrieve time blocks that fall within a specific time interval for a given set of categories. :param categ_ids: iterable containing list of category IDs :param start_dt: start of search interval (``datetime``, expected to be in display timezone) :param end_dt: end of search interval (``datetime`` in expected to be in display timezone) :param detail_level: the level of detail of information (``event|session|contribution``) :param tz: the ``timezone`` information should be displayed in :param from_categ: ``Category`` that will be taken into account to calculate visibility :param grouped: Whether to group results by start date :param includible: a callable, to allow further arbitrary custom filtering (maybe from 3rd party plugins) on whether to include (returns True) or not (returns False) each ``detail`` item. Default always returns True. :returns: a dictionary containing timetable information in a structured way. See source code for examples. """ day_start = start_dt.astimezone(utc) day_end = end_dt.astimezone(utc) dates_overlap = lambda t: (t.start_dt >= day_start) & (t.start_dt <= day_end) items = defaultdict(lambda: defaultdict(list)) # first of all, query TimetableEntries/events that fall within # specified range of dates (and category set) events = _query_events(categ_ids, day_start, day_end) if from_categ: events = events.filter(Event.is_visible_in(from_categ.id)) for eid, tt_start_dt in events: if tt_start_dt: items[eid][tt_start_dt.astimezone(tz).date()].append(tt_start_dt) else: items[eid] = None # then, retrieve detailed information about the events event_ids = set(items) query = (Event.find(Event.id.in_(event_ids)) .options(subqueryload(Event.person_links).joinedload(EventPersonLink.person), joinedload(Event.own_room).noload('owner'), joinedload(Event.own_venue), joinedload(Event.category).undefer('effective_icon_data'), undefer('effective_protection_mode'))) scheduled_events = defaultdict(list) ongoing_events = [] events = [] for e in query: if not includible(e): continue if grouped: local_start_dt = e.start_dt.astimezone(tz).date() local_end_dt = e.end_dt.astimezone(tz).date() if items[e.id] is None: # if there is no TimetableEntry, this means the event has not timetable on that interval for day in iterdays(max(start_dt.date(), local_start_dt), min(end_dt.date(), local_end_dt)): # if the event starts on this date, we've got a time slot if day.date() == local_start_dt: scheduled_events[day.date()].append((e.start_dt, e)) else: ongoing_events.append(e) else: for start_d, start_dts in items[e.id].viewitems(): scheduled_events[start_d].append((start_dts[0], e)) else: events.append(e) # result['events'][date(...)] -> [(datetime(....), Event(...))] # result[event_id]['contribs'][date(...)] -> [(TimetableEntry(...), Contribution(...))] # result['ongoing_events'] = [Event(...)] if grouped: result = defaultdict(lambda: defaultdict(lambda: defaultdict(list))) else: result = defaultdict(lambda: defaultdict(list)) result.update({ 'events': scheduled_events if grouped else events, 'ongoing_events': ongoing_events }) # according to detail level, ask for extra information from the DB if detail_level != 'event': query = _query_blocks(event_ids, dates_overlap, detail_level) if grouped: for b in query: start_date = b.timetable_entry.start_dt.astimezone(tz).date() result[b.session.event_id]['blocks'][start_date].append((b.timetable_entry, b)) else: for b in query: result[b.session.event_id]['blocks'].append(b) if detail_level == 'contribution': query = (Contribution.find(Contribution.event_id.in_(event_ids), dates_overlap(TimetableEntry), ~Contribution.is_deleted) .options(contains_eager(Contribution.timetable_entry), joinedload(Contribution.person_links)) .join(TimetableEntry)) if grouped: for c in query: start_date = c.timetable_entry.start_dt.astimezone(tz).date() result[c.event_id]['contribs'][start_date].append((c.timetable_entry, c)) else: for c in query: result[c.event_id]['contributions'].append(c) query = (Break.find(TimetableEntry.event_id.in_(event_ids), dates_overlap(TimetableEntry)) .options(contains_eager(Break.timetable_entry)) .join(TimetableEntry)) if grouped: for b in query: start_date = b.timetable_entry.start_dt.astimezone(tz).date() result[b.timetable_entry.event_id]['breaks'][start_date].append((b.timetable_entry, b)) else: for b in query: result[b.timetable_entry.event_id]['breaks'].append(b) return result
def test_subquery_eagerloading(self): User = self.classes.User Address = self.classes.Address Order = self.classes.Order # Override the default bakery for one with a smaller size. This used to # trigger a bug when unbaking subqueries. self.bakery = baked.bakery(size=3) base_bq = self.bakery(lambda s: s.query(User)) base_bq += lambda q: q.options(subqueryload(User.addresses), subqueryload(User.orders)) base_bq += lambda q: q.order_by(User.id) assert_result = [ User( id=7, addresses=[Address(id=1, email_address="*****@*****.**")], orders=[Order(id=1), Order(id=3), Order(id=5)], ), User( id=8, addresses=[ Address(id=2, email_address="*****@*****.**"), Address(id=3, email_address="*****@*****.**"), Address(id=4, email_address="*****@*****.**"), ], ), User( id=9, addresses=[Address(id=5)], orders=[Order(id=2), Order(id=4)], ), User(id=10, addresses=[]), ] for i in range(4): for cond1, cond2 in itertools.product(*[(False, True) for j in range(2)]): bq = base_bq._clone() sess = Session() if cond1: bq += lambda q: q.filter(User.name == "jack") else: bq += lambda q: q.filter(User.name.like("%ed%")) if cond2: ct = func.count(Address.id).label("count") subq = (sess.query(ct, Address.user_id).group_by( Address.user_id).having(ct > 2).subquery()) bq += lambda q: q.join(subq) if cond2: if cond1: def go(): result = bq(sess).all() eq_([], result) self.assert_sql_count(testing.db, go, 1) else: def go(): result = bq(sess).all() eq_(assert_result[1:2], result) self.assert_sql_count(testing.db, go, 3) else: if cond1: def go(): result = bq(sess).all() eq_(assert_result[0:1], result) self.assert_sql_count(testing.db, go, 3) else: def go(): result = bq(sess).all() eq_(assert_result[1:3], result) self.assert_sql_count(testing.db, go, 3) sess.close()
def threads(namespace_id, subject, from_addr, to_addr, cc_addr, bcc_addr, any_email, thread_public_id, started_before, started_after, last_message_before, last_message_after, filename, tag, limit, offset, view, db_session): if view == 'count': query = db_session.query(func.count(Thread.id)) elif view == 'ids': query = db_session.query(Thread.public_id) else: query = db_session.query(Thread) thread_criteria = [Thread.namespace_id == namespace_id] if thread_public_id is not None: query = query.filter(Thread.public_id == thread_public_id) # TODO(emfree): at this point there should be at most one object, # so we could just execute the query and check additional filters # in Python-land. if started_before is not None: thread_criteria.append(Thread.subjectdate < started_before) if started_after is not None: thread_criteria.append(Thread.subjectdate > started_after) if last_message_before is not None: thread_criteria.append(Thread.recentdate < last_message_before) if last_message_after is not None: thread_criteria.append(Thread.recentdate > last_message_after) if subject is not None: thread_criteria.append(Thread.subject == subject) thread_predicate = and_(*thread_criteria) query = query.filter(thread_predicate) if tag is not None: tag_query = db_session.query(TagItem).join(Tag). \ filter(or_(Tag.public_id == tag, Tag.name == tag), Tag.namespace_id == namespace_id).subquery() query = query.join(tag_query) if any((from_addr, to_addr, cc_addr, bcc_addr)): contact_criteria = [] if from_addr is not None: contact_criteria.append( and_(Contact.email_address == from_addr, Contact.namespace_id == namespace_id, MessageContactAssociation.field == 'from_addr')) if to_addr is not None: contact_criteria.append( and_(Contact.email_address == to_addr, Contact.namespace_id == namespace_id, MessageContactAssociation.field == 'to_addr')) if cc_addr is not None: contact_criteria.append( and_(Contact.email_address == cc_addr, Contact.namespace_id == namespace_id, MessageContactAssociation.field == 'cc_addr')) if bcc_addr is not None: contact_criteria.append( and_(Contact.email_address == bcc_addr, Contact.namespace_id == namespace_id, MessageContactAssociation.field == 'bcc_addr')) contact_query = db_session.query(Message). \ join(MessageContactAssociation).join(Contact). \ filter(or_(*contact_criteria)).subquery() query = query.join(contact_query) if any_email is not None: any_contact_query = db_session.query(Message). \ join(MessageContactAssociation).join(Contact). \ filter(Contact.email_address == any_email, Contact.namespace_id == namespace_id).subquery() query = query.join(any_contact_query) if filename is not None: files_query = db_session.query(Message). \ join(Part).join(Block). \ filter(Block.filename == filename). \ subquery() query = query.join(files_query) if view == 'count': return {"count": query.one()[0]} # Eager-load some objects in order to make constructing API # representations faster. if view != 'ids': query = query.options( subqueryload(Thread.messages).load_only('public_id', 'is_draft', 'from_addr', 'to_addr', 'cc_addr', 'bcc_addr'), subqueryload('tagitems').joinedload('tag').load_only( 'public_id', 'name')) query = query.order_by(desc(Thread.recentdate)).distinct().limit(limit) if offset: query = query.offset(offset) if view == 'ids': return [x[0] for x in query.all()] return query.all()
def events(namespace_id, event_public_id, calendar_public_id, title, description, location, starts_before, starts_after, ends_before, ends_after, source, limit, offset, view, db_session): if view == 'count': query = db_session.query(func.count(Event.id)) elif view == 'ids': query = db_session.query(Event.public_id) else: query = db_session.query(Event) query = query.filter(Event.namespace_id == namespace_id) event_criteria = [] if event_public_id: query = query.filter(Event.public_id == event_public_id) if starts_before is not None: event_criteria.append(Event.start < starts_before) if starts_after is not None: event_criteria.append(Event.start > starts_after) if ends_before is not None: event_criteria.append(Event.end < ends_before) if ends_after is not None: event_criteria.append(Event.end > ends_after) event_predicate = and_(*event_criteria) query = query.filter(event_predicate) if calendar_public_id is not None: query = query.join(Calendar). \ filter(Calendar.public_id == calendar_public_id, Calendar.namespace_id == namespace_id) if title is not None: query = query.filter(Event.title.like('%{}%'.format(title))) if description is not None: query = query.filter(Event.description.like( '%{}%'.format(description))) if location is not None: query = query.filter(Event.location.like('%{}%'.format(location))) if source is not None: query = query.filter(Event.source == source) if view == 'count': return {"count": query.one()[0]} query = query.order_by(asc(Event.start)).limit(limit) if offset: query = query.offset(offset) if view == 'ids': return [x[0] for x in query.all()] else: # Eager-load some objects in order to make constructing API # representations faster. query = query.options(subqueryload(Event.participants_by_email)) return query.all()
def go(): sess = Session() sess.query(B).options(subqueryload(B.as_.of_type(ASub))).all() sess.close()
def subscription_get_all_subscriber_ids(resource, resource_id, session=None): '''Test subscription discovery. The tested algorithm is as follows: If you're subscribed to a project_group, you will be notified about project_group, project, story, and task changes. If you are subscribed to a project, you will be notified about project, story, and task changes. If you are subscribed to a task, you will be notified about changes to that task. If you are subscribed to a story, you will be notified about changes to that story and its tasks. :param resource: The name of the resource. :param resource_id: The ID of the resource. :return: A list of user id's. ''' affected = { 'project_group': set(), 'project': set(), 'story': set(), 'task': set(), 'worklist': set() } # If we accidentally pass a timeline_event, we're actually going to treat # it as a story. if resource == 'timeline_event': event = api_base.entity_get(TimeLineEvent, resource_id, session=session) if event: if event.story_id is not None: resource = 'story' resource_id = event.story_id elif event.worklist_id is not None: resource = 'worklist' resource_id = event.worklist_id else: return set() else: return set() # Sanity check exit. if resource not in affected.keys(): return set() # Make sure the requested resource is going to be handled. affected[resource].add(resource_id) users = None # Resolve either from story->task or from task->story, so the root # resource id remains pristine. if resource == 'story': # If the story is private, make a whitelist of users to notify. story = api_base.model_query(models.Story, session) \ .options(subqueryload(models.Story.permissions)) \ .filter_by(id=resource_id).first() if story.private: users = [user.id for user in story.permissions[0].users] # Get this story's tasks query = api_base.model_query(models.Task.id, session=session) \ .filter(models.Task.story_id.in_(affected['story'])) affected['task'] = affected['task'] \ .union(r for (r,) in query.all()) elif resource == 'task': # Get this tasks's story query = api_base.model_query(models.Task.story_id, session=session) \ .filter(models.Task.id == resource_id) affected['story'].add(query.first().story_id) story = api_base.model_query(models.Story, session) \ .options(subqueryload(models.Story.permissions)) \ .filter_by(id=query.first().story_id).first() if story.private: users = [user.id for user in story.permissions[0].users] # If there are tasks, there will also be projects. if affected['task']: # Get all the tasks's projects query = api_base.model_query(distinct(models.Task.project_id), session=session) \ .filter(models.Task.id.in_(affected['task'])) affected['project'] = affected['project'] \ .union(r for (r,) in query.all()) # If there are projects, there will also be project groups. if affected['project']: # Get all the projects' groups. query = api_base.model_query( distinct(models.project_group_mapping.c.project_group_id), session=session) \ .filter(models.project_group_mapping.c.project_id .in_(affected['project'])) affected['project_group'] = affected['project_group'] \ .union(r for (r,) in query.all()) # Load all subscribers. subscribers = set() for affected_type in affected: query = api_base.model_query(distinct( models.Subscription.user_id), session=session) \ .filter(models.Subscription.target_type == affected_type) \ .filter(models.Subscription.target_id.in_(affected[affected_type])) if users is not None: query = query.filter(models.Subscription.user_id.in_(users)) results = query.all() subscribers = subscribers.union(r for (r, ) in results) return subscribers
def eager_load_documents(query): return query.options(subqueryload(models.Annotation.document))
def eager_query(cls): from sqlalchemy import orm query = super(Riskable, cls).eager_query() return cls.eager_inclusions(query, Riskable._include_links).options( orm.subqueryload('risk_objects'))
def reindex_pairs(pairs): """Reindex selected snapshots. Args: pairs: A list of parent-child pairs that uniquely represent snapshot object whose properties should be reindexed. """ if not pairs: return snapshots = dict() snapshot_query = models.Snapshot.query.filter( tuple_( models.Snapshot.parent_type, models.Snapshot.parent_id, models.Snapshot.child_type, models.Snapshot.child_id, ).in_({pair.to_4tuple() for pair in pairs})).options( orm.subqueryload("revision").load_only( "id", "resource_type", "resource_id", "_content", ), orm.load_only( "id", "context_id", "parent_type", "parent_id", "child_type", "child_id", "revision_id", )) cad_dict = _get_custom_attribute_dict() for snapshot in snapshot_query: revision = snapshot.revision snapshots[snapshot.id] = { "id": snapshot.id, "context_id": snapshot.context_id, "parent_type": snapshot.parent_type, "parent_id": snapshot.parent_id, "child_type": snapshot.child_type, "child_id": snapshot.child_id, "revision": get_searchable_attributes(CLASS_PROPERTIES[revision.resource_type], cad_dict[revision.resource_type], revision.content) } search_payload = [] for snapshot in snapshots.values(): for prop, val in get_properties(snapshot).items(): search_payload.extend( get_record_value( prop, val, { "key": snapshot["id"], "type": "Snapshot", "context_id": snapshot["context_id"], "tags": TAG_TMPL.format(**snapshot), "subproperty": "", })) delete_records(snapshots.keys()) insert_records(search_payload)
def _get_ideas_real(discussion, view_def=None, ids=None, user_id=None): user_id = user_id or Everyone # optimization: Recursive widget links. from assembl.models import (Widget, IdeaWidgetLink, IdeaDescendantsShowingWidgetLink) universal_widget_links = [] by_idea_widget_links = defaultdict(list) widget_links = discussion.db.query(IdeaWidgetLink).join(Widget).join( Discussion).filter( Widget.test_active(), Discussion.id == discussion.id, IdeaDescendantsShowingWidgetLink.polymorphic_filter()).options( joinedload(IdeaWidgetLink.idea)).all() for wlink in widget_links: if isinstance(wlink.idea, RootIdea): universal_widget_links.append({ '@type': wlink.external_typename(), 'widget': Widget.uri_generic(wlink.widget_id) }) else: for id in wlink.idea.get_all_descendants(True): by_idea_widget_links[Idea.uri_generic(id)].append({ '@type': wlink.external_typename(), 'widget': Widget.uri_generic(wlink.widget_id) }) next_synthesis_id = discussion.get_next_synthesis_id() ideas = discussion.db.query(Idea).filter_by(discussion_id=discussion.id) ideas = ideas.outerjoin( SubGraphIdeaAssociation, and_(SubGraphIdeaAssociation.sub_graph_id == next_synthesis_id, SubGraphIdeaAssociation.idea_id == Idea.id)) ideas = ideas.outerjoin(Idea.source_links) ideas = ideas.order_by(IdeaLink.order, Idea.creation_date) if ids: ids = [get_database_id("Idea", id) for id in ids] ideas = ideas.filter(Idea.id.in_(ids)) # remove tombstones ideas = ideas.filter(and_(*Idea.base_conditions())) ideas = ideas.options( contains_eager(Idea.source_links), subqueryload(Idea.attachments).joinedload("document"), subqueryload(Idea.widget_links), subqueryload(Idea.message_columns), joinedload(Idea.title).joinedload("entries"), joinedload(Idea.synthesis_title).joinedload("entries"), joinedload(Idea.description).joinedload("entries"), undefer(Idea.num_children)) permissions = get_permissions(user_id, discussion.id) Idea.prepare_counters(discussion.id, True) retval = [ idea.generic_json(view_def, user_id, permissions) for idea in ideas ] retval = [x for x in retval if x is not None] for r in retval: if r.get('widget_links', None) is not None: links = r['widget_links'][:] links.extend(universal_widget_links) links.extend(by_idea_widget_links[r['@id']]) r['active_widget_links'] = links return retval
def story_get_all(title=None, description=None, status=None, assignee_id=None, creator_id=None, project_group_id=None, project_id=None, subscriber_id=None, tags=None, marker=None, offset=None, limit=None, tags_filter_type="all", sort_field='id', sort_dir='asc', current_user=None): # Sanity checks, in case someone accidentally explicitly passes in 'None' if not sort_field: sort_field = 'id' if not sort_dir: sort_dir = 'asc' if not isinstance(status, list) and status is not None: status = [status] # Build the query. subquery = _story_build_query(title=title, description=description, assignee_id=assignee_id, creator_id=creator_id, project_group_id=project_group_id, project_id=project_id, tags=tags, tags_filter_type=tags_filter_type, current_user=current_user) # Filter by subscriber ID if subscriber_id is not None: subs = api_base.model_query(models.Subscription) subs = api_base.apply_query_filters(query=subs, model=models.Subscription, target_type='story', user_id=subscriber_id) subs = subs.subquery() subquery = subquery.join(subs, subs.c.target_id == models.Story.id) # Turn the whole shebang into a subquery. subquery = subquery.subquery('filtered_stories') # Return the story summary. query = api_base.model_query(models.StorySummary)\ .options(subqueryload(models.StorySummary.tags)) query = query.join(subquery, models.StorySummary.id == subquery.c.id) if status: query = query.filter(models.StorySummary.status.in_(status)) # paginate the query query = api_base.paginate_query(query=query, model=models.StorySummary, limit=limit, sort_key=sort_field, marker=marker, offset=offset, sort_dir=sort_dir) raw_stories = query.all() return raw_stories
def eager_query(cls): from sqlalchemy import orm query = super(RiskObject, cls).eager_query() return query.options(orm.subqueryload('risk'))
def eager_query(cls): query = super(Audit, cls).eager_query() return query.options( orm.joinedload('program'), orm.subqueryload('object_people').joinedload('person'), )
def _base_query(self, query): # pylint: disable=arguments-differ return ( query.outerjoin("metadatas") .options(subqueryload("parents_relation").joinedload("treegroup")) .options(subqueryload("metadatas")) )
def test_subquery_eagerloading(self): User = self.classes.User Address = self.classes.Address base_bq = self.bakery(lambda s: s.query(User)) base_bq += lambda q: q.options(subqueryload(User.addresses)) base_bq += lambda q: q.order_by(User.id) assert_result = [ User(id=7, addresses=[Address(id=1, email_address='*****@*****.**')]), User(id=8, addresses=[ Address(id=2, email_address='*****@*****.**'), Address(id=3, email_address='*****@*****.**'), Address(id=4, email_address='*****@*****.**'), ]), User(id=9, addresses=[Address(id=5)]), User(id=10, addresses=[]) ] for i in range(4): for cond1, cond2 in itertools.product(*[(False, True) for j in range(2)]): bq = base_bq._clone() sess = Session() if cond1: bq += lambda q: q.filter(User.name == 'jack') else: bq += lambda q: q.filter(User.name.like('%ed%')) if cond2: ct = func.count(Address.id).label('count') subq = sess.query( ct, Address.user_id).group_by(Address.user_id).\ having(ct > 2).subquery() bq += lambda q: q.join(subq) if cond2: if cond1: def go(): result = bq(sess).all() eq_([], result) self.assert_sql_count(testing.db, go, 1) else: def go(): result = bq(sess).all() eq_(assert_result[1:2], result) self.assert_sql_count(testing.db, go, 2) else: if cond1: def go(): result = bq(sess).all() eq_(assert_result[0:1], result) self.assert_sql_count(testing.db, go, 2) else: def go(): result = bq(sess).all() eq_(assert_result[1:3], result) self.assert_sql_count(testing.db, go, 2) sess.close()
def _Context_eager_query(cls): from sqlalchemy import orm return _orig_Context_eager_query().options(orm.subqueryload('user_roles'))
def load_documents(query): return query.options(subqueryload(Annotation.document))
query.filter(User.addresses.contains(someaddress)) has() (used for scalar references): query.filter(Address.user.has(name='ed')) Query.with_parent() (used for any relationship): session.query(Address).with_parent(someuser, 'addresses') Sqlalchemy提供了三种eager loading,通过query.options()函数实现。 1. subquery load: >>> from sqlalchemy.orm import subqueryload >>> jack = session.query(User).\ ... options(subqueryload(User.addresses)).\ ... filter_by(name='jack').one() >>> jack <User(name='jack', fullname='Jack Bean', password='******')> >>> jack.addresses [<Address(email_address='*****@*****.**')>, <Address(email_address='*****@*****.**')>] 2. joined load >>> from sqlalchemy.orm import joinedload >>> jack = session.query(User).\ ... options(joinedload(User.addresses)).\ ... filter_by(name='jack').one() >>> jack <User(name='jack', fullname='Jack Bean', password='******')>
def get_published(cls, num=None): pub_date = Post.pub_date.desc() posts = cls.query.options(subqueryload(Post.tags)).filter_by( status=PostStatus.published).order_by(pub_date) return posts.limit(num) if num else posts
def serialize_categories_ical(category_ids, user, event_filter=True, event_filter_fn=None, update_query=None): """Export the events in a category to iCal :param category_ids: Category IDs to export :param user: The user who needs to be able to access the events :param event_filter: A SQLalchemy criterion to restrict which events will be returned. Usually something involving the start/end date of the event. :param event_filter_fn: A callable that determines which events to include (after querying) :param update_query: A callable that can update the query used to retrieve the events. Must return the updated query object. """ own_room_strategy = joinedload('own_room') own_room_strategy.load_only('building', 'floor', 'number', 'verbose_name') own_room_strategy.lazyload('owner') own_venue_strategy = joinedload('own_venue').load_only('name') query = (Event.query.filter( Event.category_chain_overlaps(category_ids), ~Event.is_deleted, event_filter).options( load_only('id', 'category_id', 'start_dt', 'end_dt', 'title', 'description', 'own_venue_name', 'own_room_name', 'protection_mode', 'access_key'), subqueryload('acl_entries'), joinedload('person_links'), own_room_strategy, own_venue_strategy).order_by(Event.start_dt)) if update_query: query = update_query(query) it = iter(query) if event_filter_fn: it = ifilter(event_filter_fn, it) events = list(it) # make sure the parent categories are in sqlalchemy's identity cache. # this avoids query spam from `protection_parent` lookups _parent_categs = (Category._get_chain_query( Category.id.in_({e.category_id for e in events})).options( load_only('id', 'parent_id', 'protection_mode'), joinedload('acl_entries')).all()) cal = ical.Calendar() cal.add('version', '2.0') cal.add('prodid', '-//CERN//INDICO//EN') now = now_utc(False) for event in events: if not event.can_access(user): continue location = ('{} ({})'.format(event.room_name, event.venue_name) if event.venue_name and event.room_name else (event.venue_name or event.room_name)) cal_event = ical.Event() cal_event.add( 'uid', u'indico-event-{}@{}'.format(event.id, url_parse(config.BASE_URL).host)) cal_event.add('dtstamp', now) cal_event.add('dtstart', event.start_dt) cal_event.add('dtend', event.end_dt) cal_event.add('url', event.external_url) cal_event.add('summary', event.title) cal_event.add('location', location) description = [] if event.person_links: speakers = [ u'{} ({})'.format(x.full_name, x.affiliation) if x.affiliation else x.full_name for x in event.person_links ] description.append(u'Speakers: {}'.format(u', '.join(speakers))) if event.description: desc_text = unicode( event.description) or u'<p/>' # get rid of RichMarkup try: description.append( unicode(html.fromstring(desc_text).text_content())) except ParserError: # this happens e.g. if desc_text contains only a html comment pass description.append(event.external_url) cal_event.add('description', u'\n'.join(description)) cal.add_component(cal_event) return BytesIO(cal.to_ical())
def eager_query(cls): query = super(Hierarchical, cls).eager_query() return query.options(orm.subqueryload('children'), # orm.joinedload('parent'), )
def eager_query(cls): """Eager Query""" query = super(Roleable, cls).eager_query() return cls.eager_inclusions(query, Roleable._include_links).options( orm.subqueryload('access_control_list'))
def transition_attributes(profile=None, group_size=1000, debug=False, delete_table=False): """ Migrate the DbAttribute table into the attributes column of db_dbnode. """ if not is_dbenv_loaded(): transition_load_db_env(profile=profile) class DbAttribute(Base): """ DbAttribute table, use only for migration purposes. """ __tablename__ = ATTR_TABLE_NAME id = Column(Integer, primary_key=True) key = Column(String(1024), nullable=False) datatype = Column(String(10), nullable=False) tval = Column(Text, nullable=False) fval = Column(Float) ival = Column(Integer) bval = Column(Boolean) dval = Column(DateTime(timezone=True)) dbnode_id = Column(Integer, ForeignKey('db_dbnode.id'), nullable=False) dbnode = relationship('DbNode', backref='old_attrs') print("\nStarting migration of attributes") inspector = reflection.Inspector.from_engine(sa.get_scoped_session().bind) table_names = inspector.get_table_names() if NODE_TABLE_NAME not in table_names: raise Exception( "There is no {} table in the database. Transition" "to SQLAlchemy can not be done. Exiting".format(NODE_TABLE_NAME)) node_table_cols = inspector.get_columns(NODE_TABLE_NAME) col_names = [_["name"] for _ in node_table_cols] if ATTR_COL_NAME in col_names: print( "Column named {} found at the {} table of the database. I assume " "that the migration of the attributes has already been done and " "therefore I proceed with the next migration step.".format( ATTR_COL_NAME, NODE_TABLE_NAME)) return session = sa.get_scoped_session() with session.begin(subtransactions=True): print("Creating columns..") session.execute('ALTER TABLE db_dbnode ADD COLUMN attributes ' 'JSONB DEFAULT \'{}\'') from aiida.backends.sqlalchemy.models.node import DbNode total_nodes = session.query(func.count(DbNode.id)).scalar() total_groups = int(math.ceil(total_nodes / float(group_size))) error = False for i in xrange(total_groups): print("Migrating group {} of {}".format(i, total_groups)) nodes = DbNode.query.options( subqueryload('old_attrs'), load_only('id', 'attributes')).order_by( DbNode.id)[i * group_size:(i + 1) * group_size] for node in nodes: attrs, err_ = attributes_to_dict( sorted(node.old_attrs, key=lambda a: a.key)) error |= err_ node.attributes = attrs session.add(node) # Remove the db_dbnode from sqlalchemy, to allow the GC to do its # job. session.flush() session.expunge_all() del nodes gc.collect() if error: cont = query_yes_no( "There has been some errors during the " "migration. Do you want to continue?", "no") if not cont: session.rollback() sys.exit(-1) if delete_table: session.execute('DROP TABLE db_dbattribute') session.commit() print("Migration of attributes finished.")
def get(self): redis_stats_key = '{}contest:{}:stats'.format(config.redis_prefix, self.contest.id) redis_lock_key = '{}contest:{}:stats_lock'.format( config.redis_prefix, self.contest.id) redis_update_key = '{}contest:{}:stats_update'.format( config.redis_prefix, self.contest.id) redis_lock = None if self.redis_conn: redis_lock = self.redis_conn.lock(redis_lock_key, timeout=30) while True: stats_cache = self.redis_conn.get(redis_stats_key) if stats_cache is not None: self.set_header( 'Cache-Control', 'max-age={}'.format(CLIENT_STATS_CACHE_TTL)) self.write(stats_cache) return if redis_lock.acquire(blocking=False): break with closing( self.redis_conn.pubsub( ignore_subscribe_messages=True)) as pubsub: pubsub.subscribe(redis_update_key) pubsub.get_message(timeout=30) pubsub.unsubscribe() contest = self.sql_session.query(Contest)\ .filter(Contest.id == self.contest.id)\ .options(subqueryload('participations'))\ .options(subqueryload('participations.submissions'))\ .options(subqueryload('participations.submissions.token'))\ .options(subqueryload('participations.submissions.results'))\ .options(defer('participations.submissions.results.score_details'))\ .options(defer('participations.submissions.results.public_score_details'))\ .first() score_list = [] for task in contest.tasks: name = task.name task_total = 0.0 for p in contest.participations: if p.hidden: continue t_score, task_partial = task_score(p, task) t_score = round(t_score, task.score_precision) task_total += t_score score_list.append({'name': name, 'total': task_total}) contest_total = sum(t['total'] for t in score_list) def compute_ratio(score_sum): if contest_total == 0: return 1.0 / len(contest.tasks) return score_sum / contest_total stats = [{ 'name': t['name'], 'ratio': round(compute_ratio(t['total']), 2) } for t in score_list] stats_text = json.dumps({'tasks_by_score_rel': stats}) if self.redis_conn: self.redis_conn.set(redis_stats_key, stats_text, ex=REDIS_STATS_CACHE_TTL) self.redis_conn.publish(redis_update_key, 'updated') redis_lock.release() self.set_header('Cache-Control', 'max-age={}'.format(CLIENT_STATS_CACHE_TTL)) self.write(stats_text)
def indexed_query(cls): return super(Labeled, cls).indexed_query().options( orm.subqueryload("_object_labels"))
def eager_query(cls): query = super(Directive, cls).eager_query() return cls.eager_inclusions(query, Directive._include_links).options( orm.joinedload('audit_frequency'), orm.joinedload('audit_duration'), orm.subqueryload('controls'))
def _process(self): rooms = (Room.query.filter_by(is_deleted=False).options( subqueryload('available_equipment').load_only('id')).all()) return jsonify(rooms_schema.dump(rooms))
import time import gevent import collections from datetime import datetime from sqlalchemy import asc, desc from sqlalchemy.orm import subqueryload from inbox.api.kellogs import APIEncoder, encode from inbox.models import Transaction, Message, Thread from inbox.models.session import session_scope from inbox.models.util import transaction_objects QUERY_OPTIONS = { Message: (subqueryload('parts').joinedload('block'), subqueryload('thread').load_only('public_id', 'discriminator'), subqueryload('events').load_only('public_id', 'discriminator'), subqueryload('messagecategories').joinedload('category')), Thread: (subqueryload('messages').load_only('public_id', 'is_draft', 'from_addr', 'to_addr', 'cc_addr', 'bcc_addr'), ) } EVENT_NAME_FOR_COMMAND = { 'insert': 'create', 'update': 'modify', 'delete': 'delete' } def get_transaction_cursor_near_timestamp(namespace_id, timestamp, db_session): """
def get_population(self, t: int = None): """ Create a pyabc.Population object containing all particles, as far as those can be recreated from the database. In particular, rejected particles are currently not stored. Parameters ---------- t: int, optional (default = self.max_t) The population index. """ if t is None: t = self.max_t else: t = int(t) models = ( self._session.query(Model).join(Population).join(ABCSMC).options( subqueryload(Model.particles).subqueryload( Particle.samples).subqueryload(Sample.summary_statistics), subqueryload(Model.particles).subqueryload( Particle.parameters)).filter(ABCSMC.id == self.id).filter( Population.t == t).all()) py_particles = [] # iterate over models for model in models: # model id py_m = model.m for particle in model.particles: # weight py_weight = particle.w * model.p_model # parameter py_parameter = {} for parameter in particle.parameters: py_parameter[parameter.name] = parameter.value py_parameter = PyParameter(**py_parameter) # samples py_accepted_sum_stats = [] py_accepted_distances = [] for sample in particle.samples: # summary statistic py_sum_stat = {} for sum_stat in sample.summary_statistics: py_sum_stat[sum_stat.name] = sum_stat.value py_accepted_sum_stats.append(py_sum_stat) # distance py_distance = sample.distance py_accepted_distances.append(py_distance) # create particle py_particle = PyParticle( m=py_m, parameter=py_parameter, weight=py_weight, accepted_sum_stats=py_accepted_sum_stats, accepted_distances=py_accepted_distances, rejected_sum_stats=[], rejected_distances=[], accepted=True) py_particles.append(py_particle) # create population py_population = PyPopulation(py_particles) return py_population
def eager_query(cls): """Eager query classmethod.""" return super(Labeled, cls).eager_query().options( orm.subqueryload('_object_labels'))