def user_has_read_permission(cls, user: User): """Check if user can read the file (as SQL expression)""" if user: if user.is_admin: return and_(True) return or_(not_(cls.is_private), cls.owner_id == user.id) return not_(cls.is_private)
def getAll(projectId): session = database.get_session() memberInProject = session.query( Member.UserId).filter(Member.ProjectId == projectId) members = session.query(UserProfile).filter( not_(UserProfile.UserId.in_(memberInProject))) session.close() return members
def delete_unsatisfied(engine, relation, constraint, filter_constraint=None): """ Delete rows that DO NOT satisfy the specified constraint. :param engine: SQLAlchemy engine to be used :param relation: relation to scan :param constraint: logical constraint in DNF :type constraint: list of lists of predicates. First list elems are joined by disjunction, inner lists - by conjunction :param filter_constraint: constraint for defining section of table to delete data from """ delete_clause = not_(_to_bool_clause(constraint)) filter_clause = _to_bool_clause(filter_constraint) whereclause = and_(delete_clause, filter_clause) del_query = delete(table(relation['name'])).where(whereclause) _execute(engine, del_query)
def get_archivable_members(session: Session) -> Sequence[ArchivableMemberInfo]: """Return all the users that qualify for being archived right now. Selected are those users - whose last membership in the member_group ended two weeks in the past, - excluding users who currently have the `do-not-archive` property. """ # see FunctionElement.over mem_ends_at = func.upper(Membership.active_during) window_args: dict[str, ClauseElement | Sequence[ClauseElement | str] | None] = { 'partition_by': User.id, 'order_by': nulls_last(mem_ends_at), } last_mem = (select( User.id.label('user_id'), func.last_value(Membership.id).over(**window_args, rows=(None, None)).label('mem_id'), func.last_value(mem_ends_at).over(**window_args, rows=(None, None)).label('mem_end'), ).select_from(User).distinct().join(Membership).join( Config, Config.member_group_id == Membership.group_id)).cte('last_mem') stmt = ( select( User, last_mem.c.mem_id, last_mem.c.mem_end, ).select_from(last_mem) # Join the granted `do-not-archive` property, if existent .join(CurrentProperty, and_(last_mem.c.user_id == CurrentProperty.user_id, CurrentProperty.property_name == 'do-not-archive', not_(CurrentProperty.denied)), isouter=True) # …and use that to filter out the `do-not-archive` occurrences. .filter(CurrentProperty.property_name.is_(None)).join( User, User.id == last_mem.c.user_id).filter( last_mem.c.mem_end < current_timestamp() - timedelta(days=14)).order_by(last_mem.c.mem_end).options( joinedload( User.hosts), # joinedload(User.current_memberships), joinedload(User.account, innerjoin=True), joinedload(User.room), joinedload(User.current_properties_maybe_denied))) return session.execute(stmt).all()
def _convert_predicate(p): operators = { '=': operator.eq, '<>': operator.ne, '>': operator.gt, '<': operator.lt, '>=': operator.ge, '<=': operator.le, 'BETWEEN': between, 'IN': lambda column_clause, *values: column_clause.in_(values), 'LIKE': lambda column_clause, *values: column_clause.like(values), } op_str = p['operation'].lstrip('NOT ') values = (p['value'], ) if not isinstance(p['value'], list) else p['value'] binary_expr = operators[op_str](column(p['attribute']), *values) if op_str == p['operation']: return binary_expr else: return not_(binary_expr)
def _convert_predicate(p): operators = { '=': operator.eq, '<>': operator.ne, '>': operator.gt, '<': operator.lt, '>=': operator.ge, '<=': operator.le, 'BETWEEN': between, 'IN': lambda column_clause, *values: column_clause.in_(values), 'LIKE': lambda column_clause, *values: column_clause.like(values), } op_str = p['operation'].lstrip('NOT ') values = (p['value'],) if not isinstance(p['value'], list) else p['value'] binary_expr = operators[op_str](column(p['attribute']), *values) if op_str == p['operation']: return binary_expr else: return not_(binary_expr)
def get_job_candidates(job_id): """ # 2.7 Get the liked candidates for a job (job_id) sorted by time of like for each candidate get notes if exist If the job is closed, return 0 candidates. :param job_id: int, job is :return: """ # TODO get Opinion and Note one query # get liked opinion, open job liked_candidates = db.session.query(JobCandidateOpinion)\ .join(JobCandidate)\ .join(Job)\ .filter(not_(Job.status == "close"), JobCandidate.job_id == job_id, JobCandidateOpinion.opinion == "like")\ .order_by(JobCandidateOpinion.created_on.asc()).all() if not liked_candidates: return {"results": []}, 200 # prepare results dict candidate_results = {} job_candidate_likes_ids = [] for candidate in liked_candidates: job_candidate_likes_ids.append(candidate.job_candidate) candidate_results[candidate.JobCandidate.candidate_id] = {"candidate_id": candidate.JobCandidate.candidate_id, "title": candidate.JobCandidate.Candidate.title, "note": []} # get note on job_candidate with likes candidates_notes = db.session.query(JobCandidateNote)\ .join(JobCandidate)\ .filter(JobCandidate.id.in_(job_candidate_likes_ids)).all() # add notes to results [candidate_results[candidate_note.JobCandidate.candidate_id]["note"].append(candidate_note.note) for candidate_note in candidates_notes] return {"results": {"status": "success", "candidates": candidate_results}}, 200
def gather_backups(backup_dir=BACKUP_DIR): print('Starting gathering backups') backup_collections = [] for container in os.listdir(backup_dir): backups_path = os.path.join(backup_dir, container) if os.path.isdir(backups_path): collection = BackupCollection(container) collection.parse() backup_collections.append(collection) request = [] for collection in backup_collections: for backup in collection.backups: request.append(backup.save()) loop = asyncio.get_event_loop() host_ids = loop.run_until_complete(asyncio.gather(*request)) loop.close() with session_scope() as session: outdated = session.query(Backup).filter_by(host=HOST).filter(not_(Backup.id.in_(host_ids))) print('Outdated', outdated.count()) outdated.delete(synchronize_session='fetch') session.commit()
def _and_in(condition1, field2, in_ids): if not in_ids: return condition1 else: return and_(condition1, not_(field2.in_(in_ids)))
def from_dict(self, **kwargs): """Update model with dictionary.""" _force = kwargs.pop("_force", False) readonly = self._readonly_fields if hasattr( self, "_readonly_fields") else [] if hasattr(self, "_hidden_fields"): readonly += self._hidden_fields readonly += ["id", "created_at", "modified_at"] columns = self.__table__.columns.keys() relationships = self.__mapper__.relationships.keys() properties = dir(self) changes = {} for key in columns: if key.startswith("_"): continue allowed = True if _force or key not in readonly else False exists = True if key in kwargs else False if allowed and exists: val = getattr(self, key) if val != kwargs[key]: changes[key] = {"old": val, "new": kwargs[key]} setattr(self, key, kwargs[key]) for rel in relationships: if rel.startswith('_'): continue allowed = True if _force or rel not in readonly else False exists = True if rel in kwargs else False if allowed and exists: is_list = self.__mapper__.relationships[rel].uselist if is_list: valid_ids = [] query = getattr(self, rel) cls = self.__mapper__.relationships[rel].argument() for item in kwargs[rel]: if ("id" in item and query.filter_by( id=item["id"]).limit(1).count() == 1): obj = cls.query.filter_by(id=item["id"]).first() col_changes = obj.from_dict(**item) if col_changes: col_changes["id"] = str(item["id"]) if rel in changes: changes[rel].append(col_changes) else: changes.update({rel: [col_changes]}) valid_ids.append(str(item["id"])) else: col = cls() col_changes = col.from_dict(**item) query.append(col) session.flush() if col_changes: col_changes["id"] = str(col.id) if rel in changes: changes[rel].append(col_changes) else: changes.update({rel: [col_changes]}) valid_ids.append(str(col.id)) # delete rows from relationship not in kwargs[rel] for item in query.filter(not_( cls.id.in_(valid_ids))).all(): col_changes = {"id": str(item.id), "deleted": True} if rel in changes: changes[rel].append(col_changes) else: changes.update({rel: [col_changes]}) session.delete(item) else: val = getattr(self, rel) if self.__mapper__.relationships[ rel].query_class is not None: if val is not None: col_changes = val.from_dict(**kwargs[rel]) if col_changes: changes.update({rel: col_changes}) else: if val != kwargs[rel]: setattr(self, rel, kwargs[rel]) changes[rel] = {"old": val, "new": kwargs[rel]} for key in list(set(properties) - set(columns) - set(relationships)): if key.startswith("_"): continue allowed = True if _force or key not in readonly else False exists = True if key in kwargs else False if allowed and exists \ and getattr(self.__class__, key).fset is not None: val = getattr(self, key) if hasattr(val, "to_dict"): val = val.to_dict() changes[key] = {"old": val, "new": kwargs[key]} setattr(self, key, kwargs[key]) return changes
def not_(self, expression: Any) -> Union[BooleanClauseList]: if self._is_sqlalchemy: return not_(expression) raise NotImplementedError()
def download_candidates(self): return self._db\ .query(ReleaseMonitor)\ .filter(not_(or_(ReleaseMonitor.downloaded, ReleaseMonitor.nuked, ReleaseMonitor.archived, ReleaseMonitor.downloading)))
def make_metric_agg_n_wide(self): # its actually faster than joining on j try: project_pos = self.bias_dimension_properties_pids.index( Properties.PROJECT.value) except ValueError: project_pos = None mans = [ aliased(metric_aggregations_n, name='n' * (i + 1)) for i in range(len(self.bias_dimension_properties_pids)) ] man_extra = aliased( metric_aggregations_n, name='extra' ) # this extra table allows us to filter out aggregation ids that match because they include the same elements at the smaller but have more properties and values assert len(mans) >= 1, 'should be at least lenght 1' # query_cols = [self._make_agg_n_wide_project_case_statement(man_i).label(f'val_{i}') for i, man_i in enumerate(mans)] # add in case statement if known to be sitelink query_cols = [ project.code.label(f'val_{i}') if project_pos and project_pos == i else man_i.value.label(f'val_{i}') for i, man_i in enumerate(mans) ] # add in case statement if known to be sitelink man_wide_q = self.db_session.query(mans[0].id, *query_cols) \ .select_from(mans[0]) for i, man_i in enumerate(mans): if i == 0: continue else: man_wide_q = man_wide_q.join( man_i, and_( mans[i - 1].id == man_i.id, mans[i - 1].aggregation_order == i - 1, mans[i - 1].property == self.bias_dimension_properties_pids[i - 1], man_i.aggregation_order == i, man_i.property == self.bias_dimension_properties_pids[i])) man_wide_q = man_wide_q.join( man_extra, and_( mans[0].id == man_extra.id, mans[0].aggregation_order == 0, man_extra.aggregation_order == len( self.bias_dimension_properties_pids), mans[0].property == self.bias_dimension_properties_pids[0], not_( man_extra.property.in_( self.bias_dimension_properties_pids))), isouter=True) man_wide_q = man_wide_q.filter(man_extra.id.is_(None)) if project_pos: man_wide_q = man_wide_q.join(project, mans[project_pos].value == project.id, isouter=True) for i, man_i in enumerate(mans): man_wide_q = man_wide_q.filter(man_i.aggregation_order == i) man_wide_sql = man_wide_q.statement.compile( compile_kwargs={"literal_binds": True}) # log.debug(f'man wide sql is: {man_wide_sql}') return man_wide_q.subquery('man_wide')
def getAll(projectId): session = database.get_session() memberInProject = session.query(Member.UserId).filter(Member.ProjectId==projectId) members = session.query(UserProfile).filter(not_(UserProfile.UserId.in_(memberInProject))) session.close() return members
def member_candidate(project_id): session = database.get_session() projectMember = session.query(Member.UserId).filter(Member.ProjectId == project_id) candidate = session.query(UserProfile).filter(UserProfile.Status == UserStatus.Enabled,not_(UserProfile.UserId.in_(projectMember))) session.close() return candidate