def index(self): # Releases theater = Db.query(MovieETA) \ .join(MovieETA.Movie) \ .filter(MovieETA.theater <= time.time() + 1814400) \ .filter(MovieETA.theater >= time.time()) \ .filter(or_(Movie.status == 'want', Movie.status == 'waiting')) \ .order_by(MovieETA.theater) \ .all() dvd = Db.query(MovieETA) \ .join(MovieETA.Movie) \ .filter(MovieETA.dvd <= time.time() + 3628800) \ .filter(MovieETA.dvd >= time.time()) \ .filter(or_(Movie.status == 'want', Movie.status == 'waiting')) \ .order_by(MovieETA.dvd) \ .all() dvdNow = Db.query(MovieETA) \ .join(MovieETA.Movie) \ .filter(MovieETA.dvd <= time.time()) \ .filter(MovieETA.dvd > 0) \ .filter(or_(Movie.status == 'want', Movie.status == 'waiting')) \ .order_by(MovieETA.dvd) \ .all() return self.render({'dvd': dvd, 'theater':theater, 'dvdNow': dvdNow, 'running': self.cron.get('eta').isRunning()})
def user_show_groups_json(user_id, group_filter="all"): memberships = Membership.q.filter(Membership.user_id == user_id) if group_filter == "active": memberships = memberships.filter( # it is important to use == here, "is" does NOT work or_(Membership.begins_at == None, Membership.begins_at <= session.utcnow()) ).filter( # it is important to use == here, "is" does NOT work or_(Membership.ends_at == None, Membership.ends_at > session.utcnow()) ) return jsonify(items=[{ 'group_name': membership.group.name, 'begins_at': (datetime_filter(membership.begins_at) if membership.begins_at is not None else ''), 'ends_at': (datetime_filter(membership.ends_at) if membership.ends_at is not None else ''), 'actions': [{'href': url_for(".edit_membership", user_id=user_id, membership_id=membership.id), 'title': 'Bearbeiten', 'icon': 'glyphicon-edit'}, {'href': url_for(".end_membership", user_id=user_id, membership_id=membership.id), 'title': "Beenden", 'icon': 'glyphicon-off'} if membership.active() else {}], } for membership in memberships.all()])
def not_covered(cls, count_as_covered=None, count_as_not_covered_if_covered_before=None): """Filter a query to find only items without coverage records. :param count_as_covered: A list of constants that indicate types of coverage records that should count as 'coverage' for purposes of this query. :param count_as_not_covered_if_covered_before: If a coverage record exists, but is older than the given date, do not count it as covered. :return: A clause that can be passed in to Query.filter(). """ if not count_as_covered: count_as_covered = cls.DEFAULT_COUNT_AS_COVERED elif isinstance(count_as_covered, basestring): count_as_covered = [count_as_covered] # If there is no coverage record, then of course the item is # not covered. missing = cls.id==None # If we're looking for specific coverage statuses, then a # record does not count if it has some other status. missing = or_( missing, ~cls.status.in_(count_as_covered) ) # If the record's timestamp is before the cutoff time, we # don't count it as covered, regardless of which status it # has. if count_as_not_covered_if_covered_before: missing = or_( missing, cls.timestamp < count_as_not_covered_if_covered_before ) return missing
def _wall_events_query(self): """WallMixin implementation.""" from ututi.lib.wall import generic_events_query evts_generic = generic_events_query() t_evt = meta.metadata.tables['events'] t_wall_posts = meta.metadata.tables['wall_posts'] locations = [loc.id for loc in c.location.flatten] subjects = meta.Session.query(Subject)\ .filter(Subject.location_id.in_(locations))\ .all() if self.feed_filter == 'sub_department': subject_ids = [subject.id for subject in self.sub_department.subjects if check_crowds(["subject_accessor"], c.user, subject)] else: subject_ids = [subject.id for subject in subjects if check_crowds(["subject_accessor"], c.user, subject)] public_groups = meta.Session.query(Group)\ .filter(Group.location_id.in_(locations))\ .filter(Group.forum_is_public == True)\ .all() ids = [obj.id for obj in subjects + public_groups] obj_id_in_list = t_evt.c.object_id.in_(ids) if ids else False events_query = evts_generic if self.feed_filter == 'subjects': return events_query.where(or_(obj_id_in_list, t_wall_posts.c.subject_id.in_(subject_ids))) elif self.feed_filter == 'sub_department': return events_query.where(or_(t_evt.c.object_id.in_(subject_ids) if subject_ids else False, t_wall_posts.c.subject_id.in_(subject_ids))) elif self.feed_filter == 'discussions': return events_query.where(or_(t_wall_posts.c.target_location_id.in_(locations), t_wall_posts.c.subject_id.in_(subject_ids))) else: return events_query.where(or_(obj_id_in_list, t_wall_posts.c.target_location_id.in_(locations), t_wall_posts.c.subject_id.in_(subject_ids)))
def authenticate_user(self, name_or_email, password): """Authenticate user by user_name of email and password. If the user pass the authentication, return user_id, otherwise, raise error """ from sqlalchemy.sql.expression import or_ User = tables.User user = self.session.query(User) \ .filter(or_(User.user_name == name_or_email, User.email == name_or_email)) \ .first() if user is None: # maybe it's case problem, although we enforce lower case to # user name and email now, but it seems there is still some # accounts have id in different cases, so that's why we do the # user query twice name_or_email = name_or_email.lower() user = self.session.query(User) \ .filter(or_(User.user_name == name_or_email, User.email == name_or_email)) \ .first() if user is None: raise UserNotExist('User %s does not exist' % name_or_email) if not user.validate_password(password): raise BadPassword('Bad password') if not user.active: raise UserNotActived('User %s is not activated' % user.user_name) return user.user_id
def index(self): """GET /admin/gists: All items in the collection""" # url('gists') not_default_user = not c.authuser.is_default_user c.show_private = request.GET.get('private') and not_default_user c.show_public = request.GET.get('public') and not_default_user gists = Gist().query() \ .filter(or_(Gist.gist_expires == -1, Gist.gist_expires >= time.time())) \ .order_by(Gist.created_on.desc()) # MY private if c.show_private and not c.show_public: gists = gists.filter(Gist.gist_type == Gist.GIST_PRIVATE) \ .filter(Gist.gist_owner == c.authuser.user_id) # MY public elif c.show_public and not c.show_private: gists = gists.filter(Gist.gist_type == Gist.GIST_PUBLIC) \ .filter(Gist.gist_owner == c.authuser.user_id) # MY public+private elif c.show_private and c.show_public: gists = gists.filter(or_(Gist.gist_type == Gist.GIST_PUBLIC, Gist.gist_type == Gist.GIST_PRIVATE)) \ .filter(Gist.gist_owner == c.authuser.user_id) # default show ALL public gists if not c.show_public and not c.show_private: gists = gists.filter(Gist.gist_type == Gist.GIST_PUBLIC) c.gists = gists p = safe_int(request.GET.get('page', 1), 1) c.gists_pager = Page(c.gists, page=p, items_per_page=10) return render('admin/gists/index.html')
def get_wall_events_query(self): user_is_admin_of_groups = [membership.group_id for membership in self.memberships if membership.membership_type == 'administrator'] subjects = self.all_watched_subjects if self.is_teacher: subjects += self.taught_subjects from ututi.lib.wall import generic_events_query evts_generic = generic_events_query() t_evt = meta.metadata.tables['events'] t_evt_comments = meta.metadata.tables['event_comments'] t_wall_posts = meta.metadata.tables['wall_posts'] t_content_items = meta.metadata.tables['content_items'] subject_ids = [s.id for s in subjects] group_ids = [m.group.id for m in self.memberships] user_commented_evts_select = select([t_evt_comments.c.event_id], from_obj=[t_evt_comments.join(t_content_items, t_content_items.c.id == t_evt_comments.c.id)],)\ .where(t_content_items.c.created_by == self.id) user_commented_evts = map(lambda r: r[0], meta.Session.execute(user_commented_evts_select).fetchall()) query = evts_generic\ .where(or_(or_(t_evt.c.object_id.in_(subject_ids), t_wall_posts.c.subject_id.in_(subject_ids)) if subject_ids else False, # subject wall posts and_(or_(t_evt.c.author_id == self.id, # location wall posts # XXX User comments may grow to 1k-10k scale, consider a different implementation. t_evt.c.id.in_(user_commented_evts) if user_commented_evts else False), t_evt.c.event_type.in_(('subject_wall_post', 'location_wall_post'))), or_(t_evt.c.object_id.in_(group_ids),) if group_ids else False))\ .where(or_(t_evt.c.event_type != 'moderated_post_created', t_evt.c.object_id.in_(user_is_admin_of_groups) if user_is_admin_of_groups else False))\ .where(not_(t_evt.c.event_type.in_(self.ignored_events_list) if self.ignored_events_list else False)) return query
def availableChars(self, status = None, release_status = None): chars = '' db = get_session() # Make a list from string if not isinstance(status, (list, tuple)): status = [status] if release_status and not isinstance(release_status, (list, tuple)): release_status = [release_status] q = db.query(Movie) \ .outerjoin(Movie.releases, Movie.library, Library.titles, Movie.status) \ .options(joinedload_all('library.titles')) # Filter on movie status if status and len(status) > 0: q = q.filter(or_(*[Movie.status.has(identifier = s) for s in status])) # Filter on release status if release_status and len(release_status) > 0: q = q.filter(or_(*[Release.status.has(identifier = s) for s in release_status])) results = q.all() for movie in results: char = movie.library.titles[0].simple_title[0] char = char if char in ascii_lowercase else '#' if char not in chars: chars += str(char) db.expire_all() return ''.join(sorted(chars, key = str.lower))
def read_many_byuser(self, request): """ """ username = request.matchdict['username'] page = int(request.params.get("page", 1)) pagesize = int(request.params.get("pagesize", 10)) if self.Session.query(User).filter(User.username == username).first() == None: raise HTTPNotFound("Requested user does not exist.") items = [] activities_sub_query = self.Session.query(Activity.activity_identifier.label("identifier"), Activity.version, Changeset.timestamp, Changeset.fk_user).\ join(Changeset).\ filter(or_(Activity.fk_status == 2, Activity.fk_status == 3)).subquery(name="sub_act") activities_query = self.Session.query(activities_sub_query, User.username).\ join(User).filter(User.username == username).subquery(name="act") # All active and inactive stakeholders stakeholder_active = self.Session.query(Stakeholder).\ filter(or_(Stakeholder.fk_status == 2, Stakeholder.fk_status == 3)).\ subquery("st_active") # Get the five latest stakeholder by changeset stakeholder_sub_query = self.Session.query(stakeholder_active.c.stakeholder_identifier.label("identifier"), \ stakeholder_active.c.version, Changeset.timestamp, Changeset.fk_user).\ join(Changeset, Changeset.id == stakeholder_active.c.fk_changeset).\ subquery(name="sub_st") # Join the resulting set to the user table stakeholder_query = self.Session.query(stakeholder_sub_query, User.username).\ join(User).filter(User.username == username).subquery(name="st") query = self.Session.query(activities_query, literal_column("\'activity\'").label("type")).\ union(self.Session.query(stakeholder_query, literal_column("\'stakeholder\'").label("type"))).\ order_by(desc(activities_query.c.timestamp)).order_by(desc(activities_query.c.version)) for i in query.offset((page-1)*pagesize).limit(pagesize).all(): items.append({ "type": i.type, "author": i.username, "timestamp": i.timestamp, "version": i.version, "identifier": str(i.identifier) }) return { "items": items, "username": username, "totalitems": query.count(), "pagesize": pagesize, "currentpage": page } return {}
def list_bangumi(self, page, count, sort_field, sort_order, name, user_id, bangumi_type): try: session = SessionManager.Session() query_object = session.query(Bangumi).\ options(joinedload(Bangumi.cover_image)).\ filter(Bangumi.delete_mark == None) if bangumi_type != -1: query_object = query_object.filter(Bangumi.type == bangumi_type) if name is not None: name_pattern = '%{0}%'.format(name.encode('utf-8'),) logger.debug(name_pattern) query_object = query_object.\ filter(or_(Bangumi.name.ilike(name_pattern), Bangumi.name_cn.ilike(name_pattern))) # count total rows total = session.query(func.count(Bangumi.id)).\ filter(or_(Bangumi.name.ilike(name_pattern), Bangumi.name_cn.ilike(name_pattern))).\ scalar() else: total = session.query(func.count(Bangumi.id)).scalar() if sort_order == 'desc': query_object = query_object.\ order_by(desc(getattr(Bangumi, sort_field))) else: query_object = query_object.\ order_by(asc(getattr(Bangumi, sort_field))) if count == -1: bangumi_list = query_object.all() else: offset = (page - 1) * count bangumi_list = query_object.offset(offset).limit(count).all() bangumi_id_list = [bgm.id for bgm in bangumi_list] favorites = session.query(Favorites).\ filter(Favorites.bangumi_id.in_(bangumi_id_list)).\ filter(Favorites.user_id == user_id).\ all() bangumi_dict_list = [] for bgm in bangumi_list: bangumi = row2dict(bgm, Bangumi) bangumi['cover'] = utils.generate_cover_link(bgm) utils.process_bangumi_dict(bgm, bangumi) for fav in favorites: if fav.bangumi_id == bgm.id: bangumi['favorite_status'] = fav.status bangumi_dict_list.append(bangumi) return json_resp({'data': bangumi_dict_list, 'total': total}) finally: SessionManager.Session.remove()
def p_file_term(p): '''file_term : OP_FILE string''' if p[2].startswith('^'): p[0] = and_(or_(func.matches(p[2], gertty.db.file_table.c.path), func.matches(p[2], gertty.db.file_table.c.old_path)), gertty.db.file_table.c.status is not None) else: p[0] = and_(or_(gertty.db.file_table.c.path == p[2], gertty.db.file_table.c.old_path == p[2]), gertty.db.file_table.c.status is not None)
def get_locations(search_term, geo_level=None, year="2011"): if geo_level is not None and geo_level not in geo_levels: raise ValueError("Invalid geo_level: %s" % geo_level) session = get_session() try: if geo_level: levels = [geo_level] else: levels = ["country", "province", "municipality", "subplace"] objects = set() # search at each level for level in levels: # already checked that geo_level is valid model = get_geo_model(level) if level == "subplace": # check mainplace and subplace names objects.update( session.query(Ward) .join(model) .filter(model.year == year) .filter( or_( model.subplace_name.ilike(search_term + "%"), model.subplace_name.ilike("City of %s" % search_term + "%"), model.mainplace_name.ilike(search_term + "%"), model.code == search_term, ) ) .limit(10) ) else: objects.update( session.query(model) .filter(model.year == year) .filter( or_( model.name.ilike(search_term + "%"), model.name.ilike("City of %s" % search_term + "%"), model.code == search_term.upper(), ) ) .limit(10) ) order_map = {Country: 4, Ward: 3, Municipality: 2, Province: 1} objects = sorted(objects, key=lambda o: [order_map[o.__class__], getattr(o, "name", getattr(o, "code"))]) return serialize_demarcations(objects[0:10]) finally: session.close()
def add(self, group): db = get_session() identifier = '%s.%s.%s' % (group['library']['identifier'], group['meta_data'].get('audio', 'unknown'), group['meta_data']['quality']['identifier']) done_status, snatched_status = fireEvent('status.get', ['done', 'snatched'], single = True) # Add movie movie = db.query(Media).filter_by(library_id = group['library'].get('id')).first() if not movie: movie = Media( library_id = group['library'].get('id'), profile_id = 0, status_id = done_status.get('id') ) db.add(movie) db.commit() # Add Release rel = db.query(Relea).filter( or_( Relea.identifier == identifier, and_(Relea.identifier.startswith(group['library']['identifier']), Relea.status_id == snatched_status.get('id')) ) ).first() if not rel: rel = Relea( identifier = identifier, movie = movie, quality_id = group['meta_data']['quality'].get('id'), status_id = done_status.get('id') ) db.add(rel) db.commit() # Add each file type added_files = [] for type in group['files']: for cur_file in group['files'][type]: added_file = self.saveFile(cur_file, type = type, include_media_info = type is 'movie') added_files.append(added_file.get('id')) # Add the release files in batch try: added_files = db.query(File).filter(or_(*[File.id == x for x in added_files])).all() rel.files.extend(added_files) db.commit() except: log.debug('Failed to attach "%s" to release: %s', (added_files, traceback.format_exc())) fireEvent('movie.restatus', movie.id) return True
def run(self, commit, welt2000_path): welt2000 = get_database(path=welt2000_path) self.current_date = datetime.utcnow() i = 0 for airport_w2k in welt2000: if (airport_w2k.type != 'airport' and airport_w2k.type != 'glider_site' and airport_w2k.type != 'ulm'): continue i += 1 if i % 100 == 0: db.session.flush() print str(i) + ": " + airport_w2k.country_code + " " + airport_w2k.name # try to find this airport in the database near_airport = Airport.query() \ .filter(and_(Airport.short_name == airport_w2k.short_name, Airport.country_code == airport_w2k.country_code)) \ .filter(or_(Airport.valid_until == None, Airport.valid_until > self.current_date)) \ .first() # fall back to location-search if airport is not found # and only reuse this airport if it's within 250 meters of the old one... if near_airport is None or near_airport.distance(airport_w2k) > 250: near_airport = Airport.by_location(airport_w2k, distance_threshold=0.0025) if near_airport is None: # this airport is not in our database yet. add it... self.add_airport(airport_w2k) else: # seems to be the same airport. update with current values self.show_differences(near_airport, airport_w2k) self.update_airport(near_airport, airport_w2k) db.session.flush() # now invalidate all remaining airports invalid_airports = Airport.query() \ .filter(Airport.time_modified < self.current_date) \ .filter(or_(Airport.valid_until == None, Airport.valid_until > self.current_date)) for airport in invalid_airports: print "{} {} {}" \ .format(airport.country_code, airport.name, airport.icao) print " invalidated" airport.valid_until = self.current_date if commit: db.session.commit()
def list(self, status = ['active'], limit_offset = None, starts_with = None, search = None): db = get_session() # Make a list from string if not isinstance(status, (list, tuple)): status = [status] q = db.query(Movie) \ .join(Movie.library, Library.titles) \ .options(joinedload_all('releases.status')) \ .options(joinedload_all('releases.quality')) \ .options(joinedload_all('releases.files')) \ .options(joinedload_all('releases.info')) \ .options(joinedload_all('library.titles')) \ .options(joinedload_all('library.files')) \ .options(joinedload_all('status')) \ .options(joinedload_all('files')) \ .filter(LibraryTitle.default == True) \ .filter(or_(*[Movie.status.has(identifier = s) for s in status])) filter_or = [] if starts_with: starts_with = toUnicode(starts_with.lower()) if starts_with in ascii_lowercase: filter_or.append(LibraryTitle.simple_title.startswith(starts_with)) else: ignore = [] for letter in ascii_lowercase: ignore.append(LibraryTitle.simple_title.startswith(toUnicode(letter))) filter_or.append(not_(or_(*ignore))) if search: filter_or.append(LibraryTitle.simple_title.like('%%' + search + '%%')) if filter_or: q = q.filter(or_(*filter_or)) q = q.order_by(asc(LibraryTitle.simple_title)) if limit_offset: splt = limit_offset.split(',') limit = splt[0] offset = 0 if len(splt) is 1 else splt[1] q = q.limit(limit).offset(offset) results = q.all() movies = [] for movie in results: temp = movie.to_dict(self.default_dict) movies.append(temp) return movies
def get_user_member(organization_id, state=None): """ Helper function to get member states """ state_query = None if not state: state_query = or_(model.Member.state == 'active', model.Member.state == 'pending') else: state_query = or_(model.Member.state == state) query = model.Session.query(model.Member).filter(state_query) \ .filter(model.Member.table_name == 'user').filter(model.Member.group_id == organization_id).filter(model.Member.table_id == c.userobj.id) return query.first()
def get_locations(search_term, geo_level=None, year='2011'): if geo_level is not None and geo_level not in geo_levels: raise ValueError('Invalid geo_level: %s' % geo_level) session = get_session() try: if geo_level: levels = [geo_level] else: levels = ['country', 'province', 'municipality', 'subplace'] objects = set() # search at each level for level in levels: # already checked that geo_level is valid model = { 'municipality': Municipality, 'province': Province, 'subplace': Subplace, 'country': Country, }[level] if level == 'subplace': # check mainplace and subplace names objects.update(session .query(Ward) .join(model) .filter(model.year == year) .filter(or_(model.subplace_name.ilike(search_term + '%'), model.subplace_name.ilike('City of %s' % search_term + '%'), model.mainplace_name.ilike(search_term + '%'), model.code == search_term)) .limit(10) ) else: objects.update(session .query(model) .filter(model.year == year) .filter(or_(model.name.ilike(search_term + '%'), model.name.ilike('City of %s' % search_term + '%'), model.code == search_term.upper())) .limit(10) ) order_map = {Country: 4, Ward: 3, Municipality: 2, Province: 1} objects = sorted(objects, key=lambda o: [order_map[o.__class__], getattr(o, 'name', getattr(o, 'code'))]) return serialize_demarcations(objects[0:10]) finally: session.close()
def get_user( session, username=None, email=None): if username and email: filter_obj = or_( User.username==username, User.email==email ) elif username: filter_obj = or_( User.username==username ) elif email: filter_obj = or_( User.email==email ) else: return None user = session.query(User).filter( filter_obj ).first() if user: return user return None
def list_bangumi(self, page, count, sort_field, sort_order, name, bangumi_type): try: session = SessionManager.Session() query_object = session.query(Bangumi).\ options(joinedload(Bangumi.cover_image)).\ options(joinedload(Bangumi.created_by)).\ options(joinedload(Bangumi.maintained_by)).\ filter(Bangumi.delete_mark == None) if bangumi_type != -1: query_object = query_object.filter(Bangumi.type == bangumi_type) if name is not None: name_pattern = '%{0}%'.format(name.encode('utf-8'),) logger.debug(name_pattern) query_object = query_object.\ filter(or_(Bangumi.name.ilike(name_pattern), Bangumi.name_cn.ilike(name_pattern))) # count total rows total = session.query(func.count(Bangumi.id)).\ filter(or_(Bangumi.name.ilike(name_pattern), Bangumi.name_cn.ilike(name_pattern))).\ scalar() else: total = session.query(func.count(Bangumi.id)).scalar() if sort_order == 'desc': query_object = query_object.\ order_by(desc(getattr(Bangumi, sort_field))) else: query_object = query_object.\ order_by(asc(getattr(Bangumi, sort_field))) # we now support query all method by passing count = -1 if count == -1: bangumi_list = query_object.all() else: offset = (page - 1) * count bangumi_list = query_object.offset(offset).limit(count).all() bangumi_dict_list = [] for bgm in bangumi_list: bangumi = row2dict(bgm, Bangumi) bangumi['cover'] = utils.generate_cover_link(bgm) utils.process_bangumi_dict(bgm, bangumi) self.__process_user_obj_in_bangumi(bgm, bangumi) bangumi_dict_list.append(bangumi) return json_resp({'data': bangumi_dict_list, 'total': total}) # raise ClientError('something happened') finally: SessionManager.Session.remove()
def agregar_usuario_fase(self, id, **named): identity = request.environ.get('repoze.who.identity') user = identity['user'] usuarios = DBSession.query(User).join((Rol, User.groups)).filter(or_(Rol.group_name == 'Aprobador', Rol.group_name == 'Desarrollador')).all() roles = DBSession.query(Rol).filter(or_(Rol.group_name == 'Aprobador', Rol.group_name == 'Desarrollador')).all() fase = DBSession.query(Fase).filter(Fase.id == id).one() # usuarioFaseRol = DBSession.query(UsuarioFaseRol).filter(UsuarioFaseRol.fase_id == id).all() return dict(page='Asignar Usuario a fase ' + fase.descripcion, usuarios=usuarios, roles=roles, proyecto_id=id, user=user, fase=fase)
def tag_search(text, count=5): """Search in the tag_search_items table (for location tags).""" QTag = aliased(LocationTag) QParent = aliased(LocationTag) text = text.lower().strip() query = meta.Session.query(TagSearchItem)\ .join(QTag)\ .outerjoin((QParent, QParent.id==QTag.parent_id))\ .filter(TagSearchItem.terms.op('@@')(func.plainto_tsquery(text)))\ .order_by(or_(func.lower(func.btrim(QParent.title)) == text, func.lower(func.btrim(QParent.title_short)) == text).desc())\ .order_by(or_(func.lower(func.btrim(QTag.title)) == text, func.lower(func.btrim(QTag.title_short)) == text).desc())\ .order_by(func.ts_rank_cd(TagSearchItem.terms, func.plainto_tsquery(text))) if count is not None: query = query.limit(count) return query.all()
def _find_all_by_search(session, search, order): line_rows = (_new_query(session, order) .filter(or_(ExtensionSchema.exten.ilike(search), ExtensionSchema.context.ilike(search))) .all()) return _rows_to_extension_model(line_rows)
def club_change_post(form): old_club_id = g.user.club_id new_club_id = form.club.data if form.club.data != 0 else None if old_club_id == new_club_id: return redirect(url_for('.club', user=g.user_id)) g.user.club_id = new_club_id create_club_join_event(new_club_id, g.user) # assign the user's new club to all of his flights that have # no club yet flights = Flight.query().join(IGCFile) flights = flights.filter(and_(Flight.club_id == None, or_(Flight.pilot_id == g.user.id, IGCFile.owner_id == g.user.id))) for flight in flights: flight.club_id = g.user.club_id db.session.commit() flash(_('New club was saved.'), 'success') return redirect(url_for('.club', user=g.user_id))
def suggestView(self, limit = 6, **kwargs): movies = splitString(kwargs.get('movies', '')) ignored = splitString(kwargs.get('ignored', '')) seen = splitString(kwargs.get('seen', '')) cached_suggestion = self.getCache('suggestion_cached') if cached_suggestion: suggestions = cached_suggestion else: if not movies or len(movies) == 0: db = get_session() active_movies = db.query(Movie) \ .options(joinedload_all('library')) \ .filter(or_(*[Movie.status.has(identifier = s) for s in ['active', 'done']])).all() movies = [x.library.identifier for x in active_movies] if not ignored or len(ignored) == 0: ignored = splitString(Env.prop('suggest_ignore', default = '')) if not seen or len(seen) == 0: movies.extend(splitString(Env.prop('suggest_seen', default = ''))) suggestions = fireEvent('movie.suggest', movies = movies, ignore = ignored, single = True) self.setCache('suggestion_cached', suggestions, timeout = 6048000) # Cache for 10 weeks return { 'success': True, 'count': len(suggestions), 'suggestions': suggestions[:int(limit)] }
def getFilter(date): return sql.or_( sql.between(date, schema.group.c.start_date, schema.group.c.end_date), sql.and_( schema.group.c.start_date<=date, schema.group.c.end_date==None))
def find_meetings(flight_id): logger.info("Searching for near flights of flight %d" % flight_id) flight = Flight.get(flight_id) # Update FlightPathChunks of current flight FlightPathChunks.update_flight_path(flight) other_flights = FlightPathChunks.get_near_flights(flight) # delete all previous detected points between src and dst for key in other_flights: FlightMeetings.query() \ .filter(or_(and_(FlightMeetings.source == flight, FlightMeetings.destination_id == key), and_(FlightMeetings.destination == flight, FlightMeetings.source_id == key))) \ .delete() # Insert new meetings into table for flight_id, meetings in other_flights.iteritems(): other_flight = Flight.get(flight_id) for meeting in meetings: FlightMeetings.add_meeting(flight, other_flight, meeting['times'][0], meeting['times'][-1]) db.session.commit()
def _get_query_for_editors(): """ Returns a query that selects versions available to editors. """ active_versions = Session.query( mappedClass.version, mappedClass.fk_status ).\ filter(mappedClass.identifier == uid).\ filter(or_( mappedClass.fk_status == 2, mappedClass.fk_status == 3)) own_filters = and_( mappedClass.identifier == uid, not_(mappedClass.fk_status == 2), not_(mappedClass.fk_status == 3), User.username == self.request.user.username) own_versions = Session.query( mappedClass.version, mappedClass.fk_status ).\ join(Changeset).\ join(User).\ filter(*own_filters) return active_versions.union(own_versions)
def _find_zone_transfer_requests(self, context, criterion, one=False, marker=None, limit=None, sort_key=None, sort_dir=None): table = tables.zone_transfer_requests ljoin = tables.zone_transfer_requests.join( tables.domains, tables.zone_transfer_requests.c.domain_id == tables.domains.c.id) query = select( [table, tables.domains.c.name.label("domain_name")] ).select_from(ljoin) if not context.all_tenants: query = query.where(or_( table.c.tenant_id == context.tenant, table.c.target_tenant_id == context.tenant)) return self._find( context, table, objects.ZoneTransferRequest, objects.ZoneTransferRequestList, exceptions.ZoneTransferRequestNotFound, criterion, one=one, marker=marker, limit=limit, sort_dir=sort_dir, sort_key=sort_key, query=query, apply_tenant_criteria=False )
def populate_files(self): with self._session() as session: datetime_ago = datetime.datetime.utcnow() - datetime.timedelta(days=3) query = session.query(IAItem.id).filter( or_( IAItem.refresh_date.is_(None), IAItem.public_date > datetime_ago )) for row in query: identifier = row[0] _logger.info('Populating item %s.', identifier) files = yield self._api.get_item_files(identifier) query = insert(File).prefix_with('OR IGNORE') values = [] for filename, size in files: values.append({ 'ia_item_id': identifier, 'filename': filename, 'size': size, }) session.execute(query, values) query = update(IAItem).where(IAItem.id == identifier) session.execute( query, {'refresh_date': datetime.datetime.utcnow()} ) session.commit()
def query(self, req, coords, types): ## FIXME: This seems crude; it feels like it should also be ## quoted, but is at the moment safe because the coordinates ## are coerced into decimal: point = "POINT(%s %s)" % (coords[1], coords[0]) ## This is a failed attempt at the query (more GeoAlchmey ## based): (I think the problem is a bug in geoalchemy, with ## points that are constructed outside of the database/engine) #point = WKTSpatialElement(point) #s = select([Jurisdiction.__table__], expression.func.ST_Intersects( # Jurisdiction.geom, point)) #conn = engine.connect() #s = conn.execute(s, point=point, srid=4326) type_comparisons = [] for type in types: type_comparisons.append( Jurisdiction.type_uri == unicode(type)) s = session.query(Jurisdiction).filter( expression.and_( expression.func.ST_Intersects(Jurisdiction.geom, expression.func.GeomFromText(point, 4326)), expression.or_(*type_comparisons))) results = [] for row in s: results.append(dict( type=row.type_uri, name=row.name, uri=row.uri, properties=row.properties, kml_uri="%s/api1/kml/%s" % (req.application_url, row.id))) return {'results': results}
def get_user_messages(request: Request) -> dict: """Show the logged-in user's message conversations.""" # select conversations where either the user is the recipient, or they # were the sender and there is at least one reply (don't need to show # conversations the user started but haven't been replied to) conversations = ( request.query(MessageConversation) .filter(or_( MessageConversation.recipient == request.user, and_( MessageConversation.sender == request.user, MessageConversation.num_replies > 0, ), )) .order_by(desc(MessageConversation.last_reply_time)) .all() ) return {'conversations': conversations}
def on_account_created_history_entry(_, account_id: int, created_by_id: int, roles: Sequence[str], note: str): if note == '': note = None history_entry = AccountNote(accountID=account_id, byAccountID=created_by_id, note=note, type=account_notes.TYPE_ACCOUNT_CREATED) if len(roles) > 0: db_roles = db.session.query(Role).filter( or_(Role.name == name for name in roles)).all() for role in db_roles: # get role from db role_change = RoleChangeEntry(added=True, role=role) history_entry.role_changes.append(role_change) db.session.add(history_entry) db.session.commit()
def show_shelf(shelf_type, shelf_id): if current_user.is_anonymous: shelf = ub.session.query(ub.Shelf).filter( ub.Shelf.is_public == 1, ub.Shelf.id == shelf_id).first() else: shelf = ub.session.query(ub.Shelf).filter( or_( and_(ub.Shelf.user_id == int(current_user.id), ub.Shelf.id == shelf_id), and_(ub.Shelf.is_public == 1, ub.Shelf.id == shelf_id))).first() result = list() # user is allowed to access shelf if shelf: page = "shelf.html" if shelf_type == 1 else 'shelfdown.html' books_in_shelf = ub.session.query(ub.BookShelf).filter(ub.BookShelf.shelf == shelf_id)\ .order_by(ub.BookShelf.order.asc()).all() for book in books_in_shelf: cur_book = db.session.query(db.Books).filter( db.Books.id == book.book_id).filter(common_filters()).first() if cur_book: result.append(cur_book) else: cur_book = db.session.query( db.Books).filter(db.Books.id == book.book_id).first() if not cur_book: log.info('Not existing book %s in %s deleted', book.book_id, shelf) ub.session.query(ub.BookShelf).filter( ub.BookShelf.book_id == book.book_id).delete() ub.session.commit() return render_title_template(page, entries=result, title=_(u"Shelf: '%(name)s'", name=shelf.name), shelf=shelf, page="shelf") else: flash(_( u"Error opening shelf. Shelf does not exist or is not accessible"), category="error") return redirect(url_for("web.index"))
def get_total_events_for_user(self, user, parameters=None): try: group_ids = self.__get_all_group_ids_of_user(user) tlp = get_max_tlp(user.group) # TODO add validation and published checks # TODO: total events for user result = self.session.query(Event).distinct().join( EventGroupPermission).filter( and_( Event.dbcode.op('&')(4) == 4, or_(Event.tlp_level_id >= tlp, EventGroupPermission.group_id.in_(group_ids)))) result = self.__set_parameters(result, parameters) result = result.count() return result except sqlalchemy.exc.SQLAlchemyError as error: self.session.rollback() raise BrokerException(error)
def sanitize_build_year(cls) -> sase.Update: """ Query to update build_year with the year in firstuse if build_year is lower than 1940 and higher than 2020. A quick scna of the data showed that 1940 is approximately the lowest build_year found that looks reasonable compared with firstuse. Somewhere it showed that the data files were created in 2018, therefore 2020 is a bit optimistic. All "years" that fall outside of this range are overwritten with the year of firstuse. If firstuse has a diverging year that is not further remedied because there is not anything to quickly test or check against. :return: A sql statement to update the build_year column with the firstuse year """ stmt = sase.update(cls).prefix_with("IGNORE").where(sase.or_( sase.cast(cls.build_year, Integer) < constants.MIN_YEAR, sase.cast(cls.build_year, Integer) > constants.MAX_YEAR) ).values( build_year=sase.cast(sase.extract('year', sase.cast(cls.firstuse, Date)), String) ) return stmt
def build_predicate_clause(self, predicate, table): """ Build Predicate clause. Capable of taking a list of predicates as well, in which case subclauses are joined with 'OR'. """ if isinstance(predicate, REGEXTerm): # TODO: this work only in mysql. Must adapt for postgres and sqlite return table.c.predicate.op("REGEXP")(predicate) elif isinstance(predicate, list): return expression.or_(*[ self.build_predicate_clause(p, table) for p in predicate if p ]) elif predicate is not None: return table.c.predicate == predicate else: return None
def markAsRead(self): ids = None if getParam('ids'): ids = splitString(getParam('ids')) db = get_session() if ids: q = db.query(Notif).filter( or_(*[Notif.id == tryInt(s) for s in ids])) else: q = db.query(Notif).filter_by(read=False) q.update({Notif.read: True}) db.commit() return jsonified({'success': True})
def get(cls: T, id: int = None, **kwargs) -> Union[T, None]: """Gets class instance using id or named attributes Args: id (int, optional): User id. kwargs: named arguments must be an attribute of the class Returns: An instance of the class """ for arg in kwargs.keys(): assert hasattr(cls, arg) result: T = (db.session.query(cls).filter( and_( or_(getattr(cls, "id") == id, id == None), *[ getattr(cls, arg) == val for arg, val in kwargs.items() if hasattr(cls, arg) ], )).one_or_none()) return result
def add(self, path='', part=1, type=(), available=1, properties={}): db = get_session() f = db.query(File).filter( or_(File.path == toUnicode(path), File.path == path)).first() if not f: f = File() db.add(f) f.path = path f.part = part f.available = available f.type_id = self.getType(type).id db.commit() file_dict = f.to_dict() return file_dict
def provision_virt_recipes(*args): work_done = False recipes = MachineRecipe.query\ .join(Recipe.recipeset).join(RecipeSet.job)\ .join(Recipe.distro_tree, DistroTree.lab_controller_assocs, LabController)\ .filter(Recipe.status == TaskStatus.queued)\ .filter(Recipe.virt_status == RecipeVirtStatus.possible)\ .filter(LabController.disabled == False)\ .filter(or_(RecipeSet.lab_controller == None, RecipeSet.lab_controller_id == LabController.id))\ .order_by(RecipeSet.priority.desc(), Recipe.id.asc()) futures = [ get_virt_executor().submit(provision_virt_recipe, recipe_id) for recipe_id, in recipes.values(Recipe.id.distinct()) ] if futures: concurrent.futures.wait(futures) work_done = True return work_done
def search(cls, querystr, sqlalchemy_query=None, user_name=None): '''Search name, fullname, email and openid. ''' if sqlalchemy_query is None: query = meta.Session.query(cls) else: query = sqlalchemy_query qstr = '%' + querystr + '%' filters = [ cls.name.ilike(qstr), cls.fullname.ilike(qstr), cls.openid.ilike(qstr), ] # sysadmins can search on user emails import ckan.new_authz as new_authz if user_name and new_authz.is_sysadmin(user_name): filters.append(cls.email.ilike(qstr)) query = query.filter(or_(*filters)) return query
def _get_filter(filter_name, column, filters): ''' Apply filters for Disability :rtype: sqlalchemy.sql.select :returns: list of filters to be applied to query ''' where_clause = None expr = [] target_filter = filters.get(filter_name, None) if target_filter: for f in target_filter: try: expr.append(column == filter_map[f]) except: pass if expr: where_clause = or_(*expr) return where_clause
def _prepare_condition(self): self._conditions = [] self._group_by = [] for cut in self.cell.cuts: dim = self.cube.dimension(cut.dimension) if isinstance(cut, cubes.browser.PointCut): path = cut.path condition = self._point_condition(dim, path) elif isinstance(cut, cubes.browser.SetCut): conditions = [] for path in cut.paths: conditions.append(self._point_condition(dim, path)) condition = expression.or_(*conditions) else: raise Exception("Only point and set cuts are supported in SQL browser at the moment") self._conditions.append(condition) self._condition = expression.and_(*self._conditions)
def __check_user_jobs( self, job, job_wrapper ): # TODO: Update output datasets' _state = LIMITED or some such new # state, so the UI can reflect what jobs are waiting due to concurrency # limits if job.user: # Check the hard limit first if self.app.job_config.limits.registered_user_concurrent_jobs: count = self.get_user_job_count(job.user_id) # Check the user's number of dispatched jobs against the overall limit if count >= self.app.job_config.limits.registered_user_concurrent_jobs: return JOB_WAIT # If we pass the hard limit, also check the per-destination count id = job_wrapper.job_destination.id count_per_id = self.get_user_job_count_per_destination(job.user_id) if id in self.app.job_config.limits.destination_user_concurrent_jobs: count = count_per_id.get(id, 0) # Check the user's number of dispatched jobs in the assigned destination id against the limit for that id if count >= self.app.job_config.limits.destination_user_concurrent_jobs[id]: return JOB_WAIT # If we pass the destination limit (if there is one), also check limits on any tags (if any) if job_wrapper.job_destination.tags: for tag in job_wrapper.job_destination.tags: # Check each tag for this job's destination if tag in self.app.job_config.limits.destination_user_concurrent_jobs: # Only if there's a limit defined for this tag count = 0 for id in [ d.id for d in self.app.job_config.get_destinations(tag) ]: # Add up the aggregate job total for this tag count += count_per_id.get(id, 0) if count >= self.app.job_config.limits.destination_user_concurrent_jobs[tag]: return JOB_WAIT elif job.galaxy_session: # Anonymous users only get the hard limit if self.app.job_config.limits.anonymous_user_concurrent_jobs: count = self.sa_session.query( model.Job ).enable_eagerloads( False ) \ .filter( and_( model.Job.session_id == job.galaxy_session.id, or_( model.Job.state == model.Job.states.RUNNING, model.Job.state == model.Job.states.QUEUED ) ) ).count() if count >= self.app.job_config.limits.anonymous_user_concurrent_jobs: return JOB_WAIT else: log.warning( 'Job %s is not associated with a user or session so job concurrency limit cannot be checked.' % job.id ) return JOB_READY
def find_candidates_statement(self, query: str) -> Select: from joj.horse import models statement = select(models.User, models.DomainUser) statement = ( models.User.apply_search(statement, query) .outerjoin_from( models.User, models.DomainUser, models.User.id == models.DomainUser.user_id, ) .where( or_( models.DomainUser.domain_id == self.id, models.DomainUser.domain_id.is_(None), # type: ignore[attr-defined] ) ) ) return statement
def get_project_user_stats(self, user_id: int) -> ProjectUserStatsDTO: """Compute project specific stats for a given user""" stats_dto = ProjectUserStatsDTO() stats_dto.time_spent_mapping = 0 stats_dto.time_spent_validating = 0 stats_dto.total_time_spent = 0 total_mapping_time = (db.session.query( func.sum( cast(func.to_timestamp(TaskHistory.action_text, "HH24:MI:SS"), Time))).filter( or_( TaskHistory.action == "LOCKED_FOR_MAPPING", TaskHistory.action == "AUTO_UNLOCKED_FOR_MAPPING", )).filter(TaskHistory.user_id == user_id).filter( TaskHistory.project_id == self.id)) for time in total_mapping_time: total_mapping_time = time[0] if total_mapping_time: stats_dto.time_spent_mapping = total_mapping_time.total_seconds( ) stats_dto.total_time_spent += stats_dto.time_spent_mapping query = (TaskHistory.query.with_entities( func.date_trunc("minute", TaskHistory.action_date).label("trn"), func.max(TaskHistory.action_text).label("tm"), ).filter(TaskHistory.user_id == user_id).filter( TaskHistory.project_id == self.id).filter( TaskHistory.action == "LOCKED_FOR_VALIDATION").group_by( "trn").subquery()) total_validation_time = db.session.query( func.sum(cast(func.to_timestamp(query.c.tm, "HH24:MI:SS"), Time))).all() for time in total_validation_time: total_validation_time = time[0] if total_validation_time: stats_dto.time_spent_validating = total_validation_time.total_seconds( ) stats_dto.total_time_spent += stats_dto.time_spent_validating return stats_dto
def get_available_venues( start, end, sitting=None ): """get all venues that are not booked for a sitting (but sitting if given) in the given time period SQL: SELECT * FROM venues WHERE venues.venue_id NOT IN (SELECT group_sittings.venue_id FROM group_sittings WHERE (group_sittings.start_date BETWEEN '2000-01-01' AND '2000-01-02' OR group_sittings.end_date BETWEEN '2000-01-01' AND '2000-01-02' OR '2000-01-01' BETWEEN group_sittings.start_date AND group_sittings.end_date OR '2000-01-02' BETWEEN group_sittings.start_date AND group_sittings.end_date) AND group_sittings.venue_id IS NOT NULL) """ session = Session() query = session.query(domain.Venue) b_filter = sql.and_( sql.or_( sql.between(schema.sittings.c.start_date, start, end), sql.between(schema.sittings.c.end_date, start, end), sql.between(start, schema.sittings.c.start_date, schema.sittings.c.end_date), sql.between(end, schema.sittings.c.start_date, schema.sittings.c.end_date) ), schema.sittings.c.venue_id != None) if sitting: if sitting.sitting_id: b_filter = sql.and_(b_filter, schema.sittings.c.sitting_id != sitting.sitting_id) query = query.filter( sql.not_( schema.venues.c.venue_id.in_( sql.select( [schema.sittings.c.venue_id] ).where(b_filter) ))) venues = query.all() #session.close() return venues
def build_feature_filter(filter_args): feature_filter_args = [] positive_include = [(k, [v for v in vs if not v.startswith('-')]) for k, vs, in filter_args] positive_include = [(k, v) for k, vs in positive_include if vs for v in vs] if positive_include: feature_filter_args.append( or_(*(model.Tweet.features.contains({k: [v]}) for k, v in positive_include))) negative_include = [(k, [v[1:] for v in vs if v.startswith('-')]) for k, vs, in filter_args] negative_include = [(k, v) for k, vs in negative_include if vs for v in vs] if negative_include: feature_filter_args.append( and_(*(not_(model.Tweet.features.contains({k: [v]})) for k, v in negative_include))) return feature_filter_args
def feed_shelf(book_id): off = request.args.get("offset") or 0 if current_user.is_anonymous: shelf = ub.session.query(ub.Shelf).filter(ub.Shelf.is_public == 1, ub.Shelf.id == book_id).first() else: shelf = ub.session.query(ub.Shelf).filter(or_(and_(ub.Shelf.user_id == int(current_user.id), ub.Shelf.id == book_id), and_(ub.Shelf.is_public == 1, ub.Shelf.id == book_id))).first() result = list() # user is allowed to access shelf if shelf: books_in_shelf = ub.session.query(ub.BookShelf).filter(ub.BookShelf.shelf == book_id).order_by( ub.BookShelf.order.asc()).all() for book in books_in_shelf: cur_book = db.session.query(db.Books).filter(db.Books.id == book.book_id).first() result.append(cur_book) pagination = Pagination((int(off) / (int(config.config_books_per_page)) + 1), config.config_books_per_page, len(result)) return render_xml_template('feed.xml', entries=result, pagination=pagination)
def _init_urls(self): self.session = scoped_session(MygiftSession) self.query_product = self.session.query(ProductObj) self.query_price = self.session.query(ProductPriceObj) self.redis_cli = CyeRedis.getInstance() self.update_urls_key = settings.get('REDIS_UPDATE_URLS_KEY', '%s:update') % self.namespace results = self.redis_cli.zrange(self.update_urls_key, 0, lite_max_num, withscores=True) if results: for one in results: pkey = hashlib.md5(one[0]).hexdigest() product = self.query_product.filter(ProductObj.pkey == pkey).filter(or_("last_crawl_time is null", "last_crawl_time<DATE_SUB(NOW(), INTERVAL :time_interval HOUR)")).\ params(time_interval=crawl_time_interval).first() if product: self.start_urls.append(one[0]) #self.start_urls.extend(results) self.log("The number of links : %d" % len(results), log.INFO) else: self.log("Not found link to update.", log.INFO)
def getSources(self, blogId): ''' @see: IBlogSourceService.getSources ''' sql = self.session().query(SourceMapped) sql = sql.join(BlogSourceDB, SourceMapped.Id == BlogSourceDB.source) sql = sql.join( BlogMapped, BlogMapped.Id == BlogSourceDB.blog).filter(BlogMapped.Id == blogId) sql_prov = self.session().query(SourceMapped.URI) sql_prov = sql_prov.join(SourceTypeMapped, SourceTypeMapped.id == SourceMapped.typeId) sql_prov = sql_prov.filter( SourceTypeMapped.Key == self.blog_provider_type) sql = sql.filter( or_(SourceMapped.OriginURI == None, SourceMapped.OriginURI.in_(sql_prov))) return sql.all()
def get_unavailable_resources(start, end): """ get all resources that are booked in the given time period """ assert (type(start) == datetime.datetime) assert (type(end) == datetime.datetime) session = Session() b_filter = sql.or_( sql.between(schema.sittings.c.start_date, start, end), sql.between(schema.sittings.c.end_date, start, end), sql.between(start, schema.sittings.c.start_date, schema.sittings.c.end_date), sql.between(end, schema.sittings.c.start_date, schema.sittings.c.end_date)) query = session.query(BookedResources).filter(b_filter) resources = query.all() #session.close() return resources
def fellow_list(): search_form = SearchAlumniForm() if search_form.validate_on_submit(): keywords = search_form.keywords.data activities = User.query.filter( or_(User.name.like('%' + keywords + '%'))).all() return render_template('admin/verify/schoolfellow_index.html', search_form=search_form, activities=activities) page = request.args.get('page', 1, type=int) user = User.query.filter(not_(User.username == 'admin')) pagination = user.filter(not_(User.name == 'None')).paginate( page, per_page=current_app.config['FLASY_NEWS_PER_PAGE'], error_out=False) users = pagination.items return render_template("admin/verify/schoolfellow_index.html", activities=users, search_form=search_form, pagination=pagination)
def company_list(): search_form = SearchAlumniForm() if search_form.validate_on_submit(): keywords = search_form.keywords.data activities = Enterprise.query.filter( or_(Enterprise.CoporateName.like('%' + keywords + '%'))).all() return render_template('admin/verify/company_verify.html', search_form=search_form, activities=activities) page = request.args.get('page', 1, type=int) user = Enterprise.query.filter(not_(Enterprise.user_id == '1')) pagination = user.filter(not_(Enterprise.CoporateName == 'None')).paginate( page, per_page=current_app.config['FLASY_NEWS_PER_PAGE'], error_out=False) users = pagination.items return render_template("admin/verify/company_verify.html", activities=users, search_form=search_form, pagination=pagination)
def get_hosting_device_resources(self, context, id, complementary_id, tenant_id, mgmt_nw_id): ports = [] mgmt_port = None # Ports for hosting device may not yet have 'device_id' set to # Nova assigned uuid of VM instance. However, those ports will still # have 'device_owner' attribute set to complementary_id. Hence, we # use both attributes in the query to ensure we find all ports. query = context.session.query(models_v2.Port) query = query.filter_by(tenant_id=tenant_id) query = query.filter(expr.or_( expr.and_(models_v2.Port.device_id != '', models_v2.Port.device_id == id), models_v2.Port.device_owner == complementary_id)) for port_db in query: if port_db.network_id != mgmt_nw_id: ports.append({'id': port_db.id}) else: mgmt_port = {'id': port_db.id} return {'mgmt_port': mgmt_port, 'ports': ports}
def listener(self): messages = [] # Get unread if getParam('init'): db = get_session() notifications = db.query(Notif) \ .filter(or_(Notif.read == False, Notif.added > (time.time() - 259200))) \ .all() for n in notifications: ndict = n.to_dict() ndict['type'] = 'notification' messages.append(ndict) return jsonified({ 'success': True, 'result': messages, })
def activity(): search_form = SearchActivForm() if search_form.validate_on_submit(): keywords = search_form.keywords.data activities = ActivReleased.query.filter( or_(ActivReleased.title.like('%' + keywords + '%'), ActivReleased.cost.like('%' + keywords + '%'))).all() return render_template('activity/activity_index.html', search_form=search_form, activities=activities) page = request.args.get('page', 1, type=int) pagination = ActivReleased.query.order_by(ActivReleased.id).paginate( page, per_page=current_app.config['FLASY_NEWS_PER_PAGE'], error_out=False) activ_released = pagination.items return render_template('activity/activity_index.html', activities=activ_released, search_form=search_form, pagination=pagination)
def build_object_clause(self, obj, table): """ Build Object clause. Capable of taking a list of objects as well, in which case subclauses are joined with 'OR'. """ if isinstance(obj, REGEXTerm): # TODO: this work only in mysql. Must adapt for postgres and sqlite return table.c.object.op("REGEXP")(obj) elif isinstance(obj, list): return expression.or_( *[self.build_object_clause(o, table) for o in obj if o]) elif isinstance(obj, (QuotedGraph, Graph)): return table.c.object == obj.identifier elif obj is not None: return table.c.object == obj else: return None
def fetch_all_by_ligand_id(self, ligand_id, *expr, **kwargs): """ """ where = and_(LigandComponent.ligand_id == ligand_id, *expr) query = self.query.join( AromaticRing, or_( RingInteraction.aromatic_ring_bgn_id == AromaticRing.aromatic_ring_id, RingInteraction.aromatic_ring_end_id == AromaticRing.aromatic_ring_id)) query = query.join( LigandComponent, LigandComponent.residue_id == AromaticRing.residue_id) query = query.filter(where) return query
def get_non_validated_entities(self, reference_time): """ Get a list of all categories, portlets and algorithm groups that were not found valid since the reference_time. Used in initializer on each start to filter out any entities that for some reason became invalid. :return tuple (list of entities to get invalidated) (list of entities to be removed) """ try: stored_adapters = self.session.query(model.Algorithm).filter( or_(model.Algorithm.last_introspection_check == None, model.Algorithm.last_introspection_check < reference_time)).all() categories = self.session.query(model.AlgorithmCategory).filter( model.AlgorithmCategory.last_introspection_check < reference_time).all() portlets = self.session.query(model.Portlet).filter( model.Portlet.last_introspection_check < reference_time).all() result = stored_adapters + categories, portlets except SQLAlchemyError, ex: self.logger.exception(ex) result = [], []