def after_execute(conn, elt, multiparams, params, result): duration = 1000 * (time.time() - conn.info['query_start_time'].pop(-1)) action = elt.__class__.__name__ if action == 'Select': name = 'unknown' try: name = _table_name_from_select_element(elt) except Exception: logging.exception('Failed finding table name.') elif action in ['Update', 'Insert', 'Delete']: name = elt.table.name else: # create/drop tables, sqlalchemy internal schema queries, etc return action = action.lower() statsd_client.timing('db.{}.{}'.format(name, action), duration) metrics_logger.debug("table=%s query=%s duration=%.2f", name, action, duration) if has_request_context(): g.setdefault('queries_count', 0) g.setdefault('queries_duration', 0) g.queries_count += 1 g.queries_duration += duration return result
def decorator(*args, **kwargs): token = _token_extractor() user = _user_decode(token) g.setdefault('user', user) auth_func = auth_kwargs.pop('auth_func', None) auth_cls = auth_kwargs.pop('auth_cls', None) callback = _get_authorization_callback(auth_func, auth_cls, args) # jwt_authorize params take preference to Flask route params in case of name collision. kwargs.update(auth_kwargs) _check_authorization(user, callback, **kwargs) return fn(*args, **kwargs)
def allocate_friendly_ids(cls, event, n): """Allocate n Contribution friendly_ids. This is needed so that we can allocate all IDs in one go. Not doing so could result in DB deadlocks. All operations that create more than one contribution should use this method. :param event: the :class:`Event` in question :param n: the number of ids to pre-allocate """ from indico.modules.events import Event fid = increment_and_get(Event._last_friendly_contribution_id, Event.id == event.id, n) friendly_ids = g.setdefault('friendly_ids', {}) friendly_ids.setdefault(cls, {})[event.id] = range(fid - n + 1, fid + 1)
def preload_events(ids, lightweight=True, persons=False): """Preload events so they are in SA's identity cache This is useful for legacy pages where we have to show large numbers of events without being able to query them from the db cleanly. :param ids: An iterable of IDs or Conference objects :param lightweight: Only load dates and title :param persons: Also load the person links """ cache = g.setdefault('_event_cache', {}) ids = {int(getattr(id_, 'id', id_)) for id_ in ids} - cache.viewkeys() query = Event.find(Event.id.in_(ids)) if lightweight: query = query.options(load_only('id', 'title', 'start_dt', 'end_dt', 'timezone')) if persons: query = query.options(joinedload('person_links')) cache.update((e.id, e) for e in query)
def preload_relationships(cls, query, *relationships, **kwargs): """Preload relationships for all objects from a query. :param query: A SQLAlchemy query object. :param relationships: The names of relationships to preload. :param strategy: The loading strategy to use for the relationships. Defaults to `joinedload` and can be any callable that takes a relationship name and returns a query option. """ assert cls.allow_relationship_preloading strategy = kwargs.pop('strategy', joinedload) assert not kwargs # no other kwargs allowed cache = g.setdefault('relationship_cache', {}).setdefault(cls, {'data': {}, 'relationships': set()}) missing_relationships = set(relationships) - cache['relationships'] if not missing_relationships: return query = query.options(*map(strategy, missing_relationships)) data_cache = cache['data'] for obj in query: obj_cache = data_cache.setdefault(obj, {}) for rel in missing_relationships: obj_cache[rel] = getattr(obj, rel) cache['relationships'] |= missing_relationships
def _init_livesync_g(): g.setdefault('livesync_changes', defaultdict(set))
def _event_moved(conf, old_parent, new_parent, **kwargs): if new_parent.getCategoryPath()[0] != '0': g.setdefault('detached_events_moved', set()).add(conf.as_event) return event = conf.as_event event.category_id = int(new_parent.id)
def init_email_queue(): """Enable email queueing for the current context.""" g.setdefault('email_queue', [])