def delete(self): session = db.session() session.delete(self.obj) activity.send( self, actor=g.user, verb="delete", object=self.obj, target=self.activity_target, ) try: session.commit() except sa.exc.IntegrityError as e: rv = self.handle_commit_exception(e) if rv is not None: return rv session.rollback() logger.error(e) flash( _("This entity is referenced by another object and cannot be deleted."), "error", ) return self.redirect_to_view() else: flash(self.message_success(), "success") # FIXME: for DELETE verb response in case of success should be 200, 202 # (accepted) or 204 (no content) return self.redirect_to_index()
def get_permissions_assignments(self, obj=None, permission=None): """ :param permission: return only roles having this permission :returns: an dict where keys are `permissions` and values `roles` iterable. """ session = None if obj is not None: assert isinstance(obj, Entity) session = object_session(obj) if obj.id is None: obj = None if session is None: session = db.session() pa = session.query( PermissionAssignment.permission, PermissionAssignment.role ).filter(PermissionAssignment.object == obj) if permission: pa = pa.filter(PermissionAssignment.permission == permission) results = {} for permission, role in pa.yield_per(1000): results.setdefault(permission, set()).add(role) return results
def do_delete(self): data = request.form confirm = data.get("confirm_delete", False, type=bool) if not confirm: flash(_("Please fix the error(s) below"), "error") self.form_errors["confirm_delete"] = _( "Must be checked to ensure you " "intent to delete these tags" ) return self.get(self.ns) session = db.session() tags = self._get_selected_tags() if not tags: flash(_("No action performed: no tags selected"), "warning") return self.redirect_to_view() count = len(tags) entities_to_reindex = get_entities_for_reindex(tags) success_message = _n( "%(tag)s deleted", "%(num)d tags deleted:\n%(tags)s", count, tag=tags[0].label, tags=", ".join(t.label for t in tags), ) for tag in tags: session.delete(tag) session.commit() flash(success_message) schedule_entities_reindex(entities_to_reindex) return self.redirect_to_view()
def get_entities_for_reindex(tags): """Collect entities for theses tags.""" if isinstance(tags, Tag): tags = (tags,) session = db.session() indexing = get_service("indexing") tbl = Entity.__table__ tag_ids = [t.id for t in tags] query = ( sa.sql.select([tbl.c.entity_type, tbl.c.id]) .select_from(tbl.join(entity_tag_tbl, entity_tag_tbl.c.entity_id == tbl.c.id)) .where(entity_tag_tbl.c.tag_id.in_(tag_ids)) ) entities = set() with session.no_autoflush: for entity_type, entity_id in session.execute(query): if entity_type not in indexing.adapted: logger.debug("%r is not indexed, skipping", entity_type) item = ("changed", entity_type, entity_id, ()) entities.add(item) return entities
def finalize_validate(): config_file = Path(current_app.instance_path) / "config.py" logging_file = Path(current_app.instance_path) / "logging.yml" assert not config_file.exists() config = cmd_config.DefaultConfig(logging_file="logging.yml") config.SQLALCHEMY_DATABASE_URI = session_get("db")["uri"] redis_uri = session_get("redis")["uri"] config.REDIS_URI = redis_uri config.BROKER_URL = redis_uri config.CELERY_RESULT_BACKEND = redis_uri d = session_get("site_info") config.SITE_NAME = d["sitename"] config.MAIL_SENDER = d["mailsender"] is_production = d["server_mode"] == "production" config.PRODUCTION = is_production config.DEBUG = not is_production config.DEBUG_TB_ENABLED = config.DEBUG config.CELERY_ALWAYS_EAGER = not is_production cmd_config.write_config(config_file, config) cmd_config.maybe_write_logging(logging_file) admin_account = session_get("admin_account") # create a new app that will be configured with new config, # to create database and admin_user setup_app = unwrap(current_app) app = setup_app.__class__( setup_app.import_name, static_url_path=setup_app.static_url_path, static_folder=setup_app.static_folder, template_folder=setup_app.template_folder, instance_path=setup_app.instance_path, ) with app.test_request_context("/setup/finalize"): app.create_db() db_session = db.session() admin = User( email=admin_account["email"], password=admin_account["password"], last_name=admin_account["name"], first_name=admin_account["firstname"], can_login=True, ) db_session.add(admin) security = get_service("security") security.grant_role(admin, Admin) db_session.commit() session_clear() return render_template( "setupwizard/done.html", config_file=config_file, logging_file=logging_file )
def safe_session(): """Return a sqlalchemy session that can be safely used in a task. During standard async task processing, there is generally no problem. When developping with task run in eager mode, the session is not usable when task is called during an `after_commit` event. """ if not is_eager(): return db.session() return Session(bind=db.session.get_bind(None, None))
def ensure_flushed(service, *args, **kwargs): if service.app_state.needs_db_flush: session = db.session() if not session._flushing and any( isinstance(m, (RoleAssignment, SecurityAudit)) for models in (session.new, session.dirty, session.deleted) for m in models ): session.flush() service.app_state.needs_db_flush = False return fun(service, *args, **kwargs)
def prepare_args(self, args, kwargs): # we must ensure that no flush() occurs and that obj is not registered # in session (to prevent accidental insert of an incomplete object) session = db.session() with session.no_autoflush: args, kwargs = super().prepare_args(args, kwargs) try: session.expunge(self.obj) except sa.exc.InvalidRequestError: # obj is not in session pass return args, kwargs
def delete_permission(self, permission, role, obj=None): session = None if obj is not None: session = object_session(obj) if session is None: session = db.session() pa = query_pa_no_flush(session, permission, role, obj) if pa: session.delete(pa) if obj: # this seems to be required with sqlalchemy > 0.9 session.expire(obj, [PERMISSIONS_ATTR])
def add_permission(self, permission, role, obj=None): session = None if obj is not None: session = object_session(obj) if session is None: session = db.session() pa = query_pa_no_flush(session, permission, role, obj) if not pa: pa = PermissionAssignment(permission=permission, role=role, object=obj) # do it in any case: it could have been found in session.deleted session.add(pa)
def upload_new(folder): check_write_access(folder) session = db.session() base_folder = folder uncompress_files = "uncompress_files" in request.form fds = request.files.getlist("file") created_count = 0 path_cache = {} # mapping folder path in zip: folder instance for upload_fd in fds: for filepath, fd in explore_archive(upload_fd, uncompress=uncompress_files): folder = base_folder parts = [] # traverse to final directory, create intermediate if necessary. Folders # may be renamed if a file already exists, path_cache is used to keep # track of this for subfolder_name in filepath: parts.append(subfolder_name) path = "/".join(parts) if path in path_cache: folder = path_cache[path] continue subfolders = {f.title: f for f in folder.subfolders} if subfolder_name in subfolders: folder = subfolders[subfolder_name] path_cache[path] = folder continue subfolder_name = get_new_filename(folder, subfolder_name) folder = folder.create_subfolder(subfolder_name) session.flush() path_cache[path] = folder create_document(folder, fd) created_count += 1 flash( _n( "One new document successfully uploaded", "%(num)d new document successfully uploaded", num=created_count, ), "success", ) session.commit() return redirect(url_for(folder))
def add_permission( self, permission: Permission, role: Role, obj: Optional[Model] = None ) -> None: session = None if obj is not None: session = object_session(obj) if session is None: session = db.session() pa = query_pa_no_flush(session, permission, role, obj) if not pa: pa = PermissionAssignment(permission=permission, role=role, object=obj) # do it in any case: it could have been found in session.deleted session.add(pa)
def form_valid(self, redirect_to=None): """Save object. Called when form is validated. :param redirect_to: real url (created with url_for) to redirect to, instead of the view by default. """ session = db.session() with session.no_autoflush: self.before_populate_obj() self.form.populate_obj(self.obj) session.add(self.obj) self.after_populate_obj() try: session.flush() self.send_activity() session.commit() except ValidationError as e: rv = self.handle_commit_exception(e) if rv is not None: return rv session.rollback() flash(str(e), "error") return self.get() except sa.exc.IntegrityError as e: rv = self.handle_commit_exception(e) if rv is not None: return rv session.rollback() logger.error(e) flash(_("An entity with this name already exists in the system."), "error") return self.get() else: self.commit_success() flash(self.message_success(), "success") if redirect_to: return redirect(redirect_to) else: return self.redirect_to_view()
def do_merge(self): target_id = request.form.get("merge_to", type=int) if not target_id: flash(_("You must select a target tag to merge to"), "error") return self.get(self.ns) target = Tag.query.filter(Tag.ns == self.ns, Tag.id == target_id).scalar() if not target: flash(_("Target tag not found, no action performed"), "error") return self.get(self.ns) merge_from = set(self._get_selected_tags()) if target in merge_from: merge_from.remove(target) if not merge_from: flash(_("No tag selected for merging"), "warning") return self.get(self.ns) session = db.session() merge_from_ids = [t.id for t in merge_from] tbl = entity_tag_tbl entities_to_reindex = get_entities_for_reindex(merge_from) already_tagged = sa.sql.select([tbl.c.entity_id]).where( tbl.c.tag_id == target.id ) del_dup = tbl.delete().where( sa.sql.and_( tbl.c.tag_id.in_(merge_from_ids), tbl.c.entity_id.in_(already_tagged) ) ) session.execute(del_dup) update = ( tbl.update() .where(tbl.c.tag_id.in_(merge_from_ids)) .values(tag_id=target.id) ) session.execute(update) for merged in merge_from: session.delete(merged) session.commit() schedule_entities_reindex(entities_to_reindex) return self.redirect_to_view()
def delete_permission( self, permission: Permission, role: Role, obj: Optional[Model] = None ) -> None: session = None if obj is not None: session = object_session(obj) if session is None: session = db.session() pa = query_pa_no_flush(session, permission, role, obj) if pa: session.delete(pa) if obj: # this seems to be required with sqlalchemy > 0.9 session.expire(obj, [PERMISSIONS_ATTR])
def after_flush(self, session, flush_context): if not self.running or session is not db.session(): return to_update = self.app_state.to_update session_objs = ( ("new", session.new), ("deleted", session.deleted), ("changed", session.dirty), ) for key, objs in session_objs: for obj in objs: model_name = fqcn(obj.__class__) adapter = self.adapted.get(model_name) if adapter is None or not adapter.indexable: continue to_update.append((key, obj))
def _current_user_manager(self, session=None): """Return the current user, or SYSTEM user.""" if session is None: session = db.session() try: user = g.user except Exception: return session.query(User).get(0) if sa.orm.object_session(user) is not session: # this can happen when called from a celery task during development # (with CELERY_ALWAYS_EAGER=True): the task SA session is not # app.db.session, and we should not attach this object to # the other session, because it can make weird, hard-to-debug # errors related to session.identity_map. return session.query(User).get(user.id) else: return user
def _current_user_manager(self, session=None): """Return the current user, or SYSTEM user.""" if session is None: session = db.session() try: user = g.user except BaseException: return session.query(User).get(0) if sa.orm.object_session(user) is not session: # this can happen when called from a celery task during development # (with CELERY_ALWAYS_EAGER=True): the task SA session is not # app.db.session, and we should not attach this object to # the other session, because it can make weird, hard-to-debug # errors related to session.identity_map. return session.query(User).get(user.id) else: return user
def reindex_tree(obj): """Schedule reindexing `obj` and all of its descendants. Generally needed to update indexed security. """ assert isinstance(obj, CmisObject) index_service = get_service("indexing") if not index_service.running: return descendants = ( sa.select([CmisObject.id, CmisObject._parent_id]) .where(CmisObject._parent_id == sa.bindparam("ancestor_id")) .cte(name="descendants", recursive=True) ) da = descendants.alias() CA = sa.orm.aliased(CmisObject) d_ids = sa.select([CA.id, CA._parent_id]) descendants = descendants.union_all(d_ids.where(CA._parent_id == da.c.id)) session = sa.orm.object_session(obj) or db.session() # including ancestor_id in entity_ids_q will garantee at least 1 value for the # "IN" predicate; otherwise when using sqlite (as during tests...) # 'ResourceClosedError' will be raised. # # as an added bonus, "obj" will also be in query results, thus will be added # in "to_update" without needing to do it apart. entity_ids_q = sa.union( sa.select([descendants.c.id]), sa.select([sa.bindparam("ancestor_id")]) ) query = ( session.query(Entity) .filter(Entity.id.in_(entity_ids_q)) .options(sa.orm.noload("*")) .params(ancestor_id=obj.id) ) to_update = index_service.app_state.to_update key = "changed" for item in query.yield_per(1000): to_update.append((key, item))
def get_role_assignements(self, obj): session = object_session(obj) if obj is not None else db.session if not session: session = db.session() query = session.query(RoleAssignment) query = query.filter(RoleAssignment.object == obj) \ .options(subqueryload('user.groups')) role_assignments = query.all() results = [] for ra in role_assignments: if ra.anonymous: principal = AnonymousRole elif ra.user: principal = ra.user else: principal = ra.group results.append((principal, ra.role)) return results
def get(self): obj_count = (sa.sql.select([ Tag.ns, func.count(entity_tag_tbl.c.entity_id).label("obj_count") ]).select_from(Tag.__table__.join(entity_tag_tbl)).group_by( Tag.ns).alias()) ns_query = (sa.sql.select( [ Tag.ns, func.count(Tag.id).label("tag_count"), obj_count.c.obj_count ], from_obj=[ Tag.__table__.outerjoin(obj_count, Tag.ns == obj_count.c.ns) ], ).group_by(Tag.ns, obj_count.c.obj_count).order_by(Tag.ns)) session = db.session() namespaces = session.execute(ns_query) return render_template("admin/tags.html", namespaces=namespaces)
def get_role_assignements(self, obj): session = object_session(obj) if obj is not None else db.session if not session: session = db.session() query = session.query(RoleAssignment) query = query.filter(RoleAssignment.object == obj).options( subqueryload("user.groups") ) role_assignments = query.all() results = [] for ra in role_assignments: if ra.anonymous: principal = AnonymousRole elif ra.user: principal = ra.user else: principal = ra.group results.append((principal, ra.role)) return results
def load_user(user_id: str) -> Optional[User]: try: user = User.query.get(user_id) if not user or not user.can_login: # if a user is edited and should not have access any more, this # will ensure they cannot continue if he had an active session return None except Exception: logger.warning("Error during login.", exc_info=True) session = db.session() if not session.is_active: # session is not usable, rollback should restore a usable # session session.rollback() return None app = unwrap(current_app) app.services[AuthService.name].user_logged_in(app, user) user_loaded.send(app, user=user) return user
def after_commit(self, session: Session) -> None: """Any db updates go through here. We check if any of these models have ``__searchable__`` fields, indicating they need to be indexed. With these we update the whoosh index for the model. If no index exists, it will be created here; this could impose a penalty on the initial commit of a model. """ if (not self.running # pyre-fixme[16]: `Optional` has no attribute `nested`. or session.transaction.nested # inside a sub-transaction: # not yet written in DB or session is not db.session()): # note: we have not tested too far if session is enclosed in a transaction # at connection level. For now it's not a standard use case, it would most # likely happens during tests (which don't do that for now) return primary_field = "id" state = self.app_state items = [] for op, obj in state.to_update: model_name = fqcn(obj.__class__) if model_name not in self.adapted or not self.adapted[ model_name].indexable: # safeguard continue # safeguard against DetachedInstanceError if sa.orm.object_session(obj) is not None: items.append((op, model_name, getattr(obj, primary_field), {})) if items: index_update.apply_async(kwargs={ "index": "default", "items": items }) self.clear_update_queue()
def load_user(user_id): try: user = User.query.get(user_id) if not user or not user.can_login: # if a user is edited and should not have access any more, this # will ensure they cannot continue if he had an active session return None except Exception: logger.warning("Error during login.", exc_info=True) session = db.session() if not session.is_active: # session is not usable, rollback should restore a usable # session session.rollback() return None app = unwrap(current_app) app.services[AuthService.name].user_logged_in(app, user) user_loaded.send(app, user=user) return user
def get(self): obj_count = ( sa.sql.select( [Tag.ns, func.count(entity_tag_tbl.c.entity_id).label("obj_count")] ) .select_from(Tag.__table__.join(entity_tag_tbl)) .group_by(Tag.ns) .alias() ) ns_query = ( sa.sql.select( [Tag.ns, func.count(Tag.id).label("tag_count"), obj_count.c.obj_count], from_obj=[Tag.__table__.outerjoin(obj_count, Tag.ns == obj_count.c.ns)], ) .group_by(Tag.ns, obj_count.c.obj_count) .order_by(Tag.ns) ) session = db.session() namespaces = session.execute(ns_query) return render_template("admin/tags.html", namespaces=namespaces)
def reindex_tree(obj): """Schedule reindexing `obj` and all of its descendants. Generally needed to update indexed security. """ assert isinstance(obj, CmisObject) index_service = get_service("indexing") if not index_service.running: return descendants = (sa.select([ CmisObject.id, CmisObject._parent_id ]).where(CmisObject._parent_id == sa.bindparam("ancestor_id")).cte( name="descendants", recursive=True)) da = descendants.alias() CA = sa.orm.aliased(CmisObject) d_ids = sa.select([CA.id, CA._parent_id]) descendants = descendants.union_all(d_ids.where(CA._parent_id == da.c.id)) session = sa.orm.object_session(obj) or db.session() # including ancestor_id in entity_ids_q will garantee at least 1 value for the # "IN" predicate; otherwise when using sqlite (as during tests...) # 'ResourceClosedError' will be raised. # # as an added bonus, "obj" will also be in query results, thus will be added # in "to_update" without needing to do it apart. entity_ids_q = sa.union(sa.select([descendants.c.id]), sa.select([sa.bindparam("ancestor_id")])) query = (session.query(Entity).filter(Entity.id.in_(entity_ids_q)).options( sa.orm.noload("*")).params(ancestor_id=obj.id)) to_update = index_service.app_state.to_update key = "changed" for item in query.yield_per(1000): to_update.append((key, item))
def after_commit(self, session): """Any db updates go through here. We check if any of these models have ``__searchable__`` fields, indicating they need to be indexed. With these we update the whoosh index for the model. If no index exists, it will be created here; this could impose a penalty on the initial commit of a model. """ if ( not self.running or session.transaction.nested # inside a sub-transaction: # not yet written in DB or session is not db.session() ): # note: we have not tested too far if session is enclosed in a transaction # at connection level. For now it's not a standard use case, it would most # likely happens during tests (which don't do that for now) return primary_field = "id" state = self.app_state items = [] for op, obj in state.to_update: model_name = fqcn(obj.__class__) if model_name not in self.adapted or not self.adapted[model_name].indexable: # safeguard continue # safeguard against DetachedInstanceError if sa.orm.object_session(obj) is not None: items.append((op, model_name, getattr(obj, primary_field), {})) if items: index_update.apply_async(kwargs={"index": "default", "items": items}) self.clear_update_queue()
def stats_since(dt): new_members = new_documents = new_messages = 0 after_date = datetime.utcnow() - dt session = db.session() counts_per_type = ( session.query( AuditEntry.entity_type.label("type"), sa.func.count(AuditEntry.entity_type).label("count"), ) .group_by(AuditEntry.entity_type) .filter(AuditEntry.happened_at > after_date) .filter(AuditEntry.type == CREATION) .all() ) for entity_type, count in counts_per_type: entity_class = entity_type.split(".")[-1] if entity_class == "User": new_members = count elif entity_class == "Document": new_documents = count elif entity_class == "Message": new_messages = count active_users = ( session.query(sa.func.count(User.id)) .filter(User.last_active > after_date) .scalar() ) return { "new_members": new_members, "active_users": active_users, "new_documents": new_documents, "new_messages": new_messages, }
def retrieve( self, pk: int, _session: Optional[Session] = None, **data: Any ) -> Entity: if _session is None: _session = db.session() return _session.query(self.model_class).get(pk)
def save_data(self, data): """ @param data: list of (id(int), attr_sig, modified(dict)) """ session = db.session() changed_items = 0 skipped_items = 0 created_items = 0 error_happened = False ids = [update.id for update in data if update.id is not None] q = self.model_cls.query if ids: q.filter(self.model_cls.id.in_(ids)).all() attr_to_column = self.attr_to_column for item_update in data: is_new = item_update.id is None item = q.get(item_update.id) if not is_new else self.model_cls() signed_attrs = "{}.{}".format(";".join(item_update.attrs), item_update.sig) if not self.signer.validate(signed_attrs): item_id = str(item_update.id) if not is_new else "new" logger.debug( 'Validation failed, skipping item "%s"' "\n" 'item_attrs="%s"' "\n" 'attr_sig="%s"', item_id, repr(item_update.attrs), repr(item_update.sig), ) skipped_items += 1 continue try: with session.begin_nested(): for attr, value in item_update.data.items(): if attr not in item_update.attrs: continue col = attr_to_column[attr] if hasattr(col, "type_") and col.type_ is not None: value = col.type_(value) value = col.deserialize(value) import_val, current = col.data_for_import(item) update = self._import_value(item, col, current, value) # at this stage we don't expect anymore import errors assert not update.error if col.required and not update.value: # FIXME: missing param raise ExcelImportError("missing required") if update: self._set_obj_value(item, attr, update) db.session.add(item) # import "many relateds" values many_relateds = item_update.data.get("__many_related__", {}) many_relateds_map = { cs.related_attr: cs for cs in self.MANY_RELATED_CS } for rel_attr, updates in many_relateds.items(): cs = many_relateds_map.get(rel_attr) if not cs: logger.error( 'Many relateds: columns set for "%s" not found', rel_attr, extra={"stack": True}, ) continue manager = cs.create_manager() prop = sa.inspect(self.model_cls).attrs[cs.related_attr] # FIXME: we assume relation is made with 1 attribute prop = list(prop._reverse_property)[0] prop_key = prop.key del prop rel_attr_to_col = self.get_attr_to_column( cs, map_related_attr=True ) for update in updates: obj = cs.model_cls() setattr(obj, prop_key, item) for attr, value in update.data.items(): if attr not in update.attrs: continue col = rel_attr_to_col[attr] if hasattr(col, "type_") and col.type_ is not None: value = col.type_(value) value = col.deserialize(value) import_val, current = col.data_for_import(obj) imported = manager._import_value( obj, col, current, value ) # at this stage we don't expect anymore import errors assert not imported.error if col.required and not imported.value: # FIXME: missing param raise ExcelImportError("missing required") if imported: manager._set_obj_value(obj, attr, imported) db.session.add(obj) except Exception as e: if isinstance(e, sa.exc.StatementError): logger.error( "Import error: %s%s\n%s", text_type(e).encode("utf-8"), e.statement, pprint.pformat(e.params), exc_info=True, ) else: logger.error( "Import error: %s", text_type(e).encode("utf-8"), exc_info=True ) raise error_happened = True skipped_items += 1 # skip this item continue else: if is_new: created_items += 1 else: changed_items += 1 if error_happened: # FIXME: include data for showing failed signatures, error during # attr conversion, etc logger.error("Excel import error", extra={"stack": True}) db.session.commit() return dict( changed_items=changed_items, created_items=created_items, skipped_items=skipped_items, error_happened=error_happened, )
def retrieve(self, pk, _session=None, **data): if _session is None: _session = db.session() return _session.query(self.Model).get(pk)
def retrieve(self, pk, _session=None, **data): if _session is None: _session = db.session() return _session.query(self.model_class).get(pk)
def post(self): data = request.form group = data.get("group", "").strip() Model = data.get("Model", "").strip() return_to = data.get("return_to") return_endpoint = ".vocabularies" return_args = {} if return_to not in (None, "group", "model"): return_to = None def do_return(): return redirect(url_for(return_endpoint, **return_args)) if not Model: return do_return() if not group or group == "_": # default group group = None svc = get_service("vocabularies") Model = svc.get_vocabulary(name=Model, group=group) if not Model: return do_return() if return_to is not None: return_endpoint += "_" + return_to if return_to == "group": return_args["group"] = group or "_" elif return_to == "model": return_args["group"] = Model.Meta.group or "_" return_args["Model"] = Model.Meta.name if "up" in data: cmp_op = Model.position.__lt__ cmp_order = Model.position.desc() object_id = int(data.get("up")) elif "down" in data: cmp_op = Model.position.__gt__ cmp_order = Model.position.asc() object_id = int(data.get("down")) else: return do_return() session = db.session() query = Model.query.with_lockmode("update") item = query.get(object_id) other = query.filter(cmp_op(item.position)).order_by(cmp_order).first() if other is not None: # switch positions # we have to work around unique constraint on 'position', since we cannot # write new positions simultaneously # "-1" is added to avoid breaking when one position==0 pos = other.position other.position = -item.position - 1 item.position = -pos - 1 session.flush() item.position = pos other.position = -other.position - 1 session.commit() return do_return()
def has_permission(self, user, permission, obj=None, inherit=False, roles=None): """ :param obj: target object to check permissions. :param inherit: check with permission inheritance. By default, check only local roles. :param roles: additional valid role or iterable of roles having `permission`. """ if not isinstance(permission, Permission): assert permission in PERMISSIONS permission = Permission(permission) user = unwrap(user) if not self.running: return True session = None if obj is not None: session = object_session(obj) if session is None: session = db.session() # root always have any permission if isinstance(user, User) and user.id == 0: return True # valid roles # 1: from database pa_filter = PermissionAssignment.object == None if obj is not None and obj.id is not None: pa_filter |= PermissionAssignment.object == obj pa_filter &= PermissionAssignment.permission == permission valid_roles = session.query(PermissionAssignment.role).filter(pa_filter) valid_roles = {res[0] for res in valid_roles.yield_per(1000)} # complete with defaults valid_roles |= {Admin} # always have all permissions valid_roles |= DEFAULT_PERMISSION_ROLE.get(permission, set()) # FIXME: obj.__class__ could define default permisssion matrix too if roles is not None: if isinstance(roles, (Role,) + (str,)): roles = (roles,) for r in roles: valid_roles.add(Role(r)) # FIXME: query permission_role: global and on object if AnonymousRole in valid_roles: return True if Authenticated in valid_roles and not user.is_anonymous: return True # first test global roles, then object local roles checked_objs = [None, obj] if inherit and obj is not None: while obj.inherit_security and obj.parent is not None: obj = obj.parent checked_objs.append(obj) principals = [user] + list(user.groups) self._fill_role_cache_batch(principals) return any( self.has_role(principal, valid_roles, item) for principal in principals for item in checked_objs )
def has_permission(self, user, permission, obj=None, inherit=False, roles=None): """ :param obj: target object to check permissions. :param inherit: check with permission inheritance. By default, check only local roles. :param roles: additional valid role or iterable of roles having `permission`. """ if not isinstance(permission, Permission): assert permission in PERMISSIONS permission = Permission(permission) user = unwrap(user) if not self.running: return True session = None if obj is not None: session = object_session(obj) if session is None: session = db.session() # root always have any permission if isinstance(user, User) and user.id == 0: return True # valid roles # 1: from database pa_filter = PermissionAssignment.object == None if obj is not None and obj.id is not None: pa_filter |= PermissionAssignment.object == obj pa_filter &= PermissionAssignment.permission == permission valid_roles = session.query(PermissionAssignment.role).filter(pa_filter) valid_roles = {res[0] for res in valid_roles.yield_per(1000)} # complete with defaults valid_roles |= {Admin} # always have all permissions valid_roles |= DEFAULT_PERMISSION_ROLE.get(permission, set()) # FIXME: obj.__class__ could define default permisssion matrix too if roles is not None: if isinstance(roles, (Role,) + string_types): roles = (roles,) for r in roles: valid_roles.add(Role(r)) # FIXME: query permission_role: global and on object if AnonymousRole in valid_roles: return True if Authenticated in valid_roles and not user.is_anonymous: return True # first test global roles, then object local roles checked_objs = [None, obj] if inherit and obj is not None: while obj.inherit_security and obj.parent is not None: obj = obj.parent checked_objs.append(obj) principals = [user] + list(user.groups) self._fill_role_cache_batch(principals) return any( ( self.has_role(principal, valid_roles, item) for principal in principals for item in checked_objs ) )