def user_is_flamenco_user(self, user_id: bson.ObjectId) -> bool: """Returns True iff the user has Flamenco User role.""" from pillar import current_app from pillar.auth import UserClass assert isinstance(user_id, bson.ObjectId) # TODO: move role/cap checking code to Pillar. users_coll = current_app.db('users') db_user = users_coll.find_one({'_id': user_id}, {'roles': 1}) if not db_user: self._log.debug('user_is_flamenco_user: User %s not found', user_id) return False user = UserClass.construct('', db_user) return user.has_cap('flamenco-use')
def attach_task_log(manager_id: ObjectId, _, task_id: str): """Store the POSTed task log as a file in the storage backend. Also updates the task itself to have a reference to the file. """ # We only want to deal with GZipped files. if 'logfile' not in request.files: raise wz_exceptions.BadRequest("Missing uploaded file named 'logfile'") uploaded_file: werkzeug.datastructures.FileStorage = request.files['logfile'] if not uploaded_file.filename.endswith('.gz'): # The test HTTP client doesn't support setting per-part headers. raise wz_exceptions.BadRequest(f'GZIP your file!') # De-queue now; if the task or project doesn't exist, the Manager shouldn't be asked again. task_oid = str2id(task_id) current_flamenco.manager_manager.dequeue_task_log_request(manager_id, task_oid) # Check whether this Manager may attach to this Task. tasks_coll = current_flamenco.db('tasks') task = tasks_coll.find_one({'_id': task_oid, 'manager': manager_id}) if not task: raise wz_exceptions.NotFound(f'No such task exists') proj_coll = current_app.db('projects') project = proj_coll.find_one({'_id': task['project'], '_deleted': {'$ne': True}}, projection={'url': True}) if not project: log.warning('attach_task_log(%s, %s): project %s does not exist!', manager_id, task_id, task['project']) raise wz_exceptions.NotFound(f'Project for task {task_oid} does not exist') preexisting = current_flamenco.task_manager.api_attach_log(task, uploaded_file) resp = jsonify({'_message': 'ok'}, status=200 if preexisting else 201) resp.headers['Location'] = url_for( 'flamenco.tasks.perproject.download_task_log_file', project_url=project['url'], task_id=task_id) return resp
def get_project_id(self): nodes_coll = current_app.db('nodes') result = nodes_coll.find_one({'_id': self.node_id}) return result['project']
def upgrade_attachment_usage(proj_url=None, all_projects=False, go=False): """Replaces '@[slug]' with '{attachment slug}'. Also moves links from the attachment dict to the attachment shortcode. """ if bool(proj_url) == all_projects: log.error('Use either --project or --all.') return 1 import html from pillar.api.projects.utils import node_type_dict from pillar.api.utils import remove_private_keys from pillar.api.utils.authentication import force_cli_user force_cli_user() nodes_coll = current_app.db('nodes') total_nodes = 0 failed_node_ids = set() # Use a mixture of the old slug RE that still allowes spaces in the slug # name and the new RE that allows dashes. old_slug_re = re.compile(r'@\[([a-zA-Z0-9_\- ]+)\]') for proj in _db_projects(proj_url, all_projects, go=go): proj_id = proj['_id'] proj_url = proj.get('url', '-no-url-') nodes = nodes_coll.find({ '_deleted': {'$ne': True}, 'project': proj_id, 'properties.attachments': {'$exists': True}, }) node_count = nodes.count() if node_count == 0: log.debug('Skipping project %s (%s)', proj_url, proj_id) continue proj_node_types = node_type_dict(proj) for node in nodes: attachments = node['properties']['attachments'] replaced = False # Inner functions because of access to the node's attachments. def replace(match): nonlocal replaced slug = match.group(1) log.debug(' - OLD STYLE attachment slug %r', slug) try: att = attachments[slug] except KeyError: log.info("Attachment %r not found for node %s", slug, node['_id']) link = '' else: link = att.get('link', '') if link == 'self': link = " link='self'" elif link == 'custom': url = att.get('link_custom') if url: link = " link='%s'" % html.escape(url) replaced = True return '{attachment %r%s}' % (slug.replace(' ', '-'), link) def update_markdown(value: str) -> str: return old_slug_re.sub(replace, value) iter_markdown(proj_node_types, node, update_markdown) # Remove no longer used properties from attachments new_attachments = {} for slug, attachment in attachments.items(): replaced |= 'link' in attachment # link_custom implies link attachment.pop('link', None) attachment.pop('link_custom', None) new_attachments[slug.replace(' ', '-')] = attachment node['properties']['attachments'] = new_attachments if replaced: total_nodes += 1 else: # Nothing got replaced, continue if go: # Use Eve to PUT, so we have schema checking. db_node = remove_private_keys(node) r, _, _, status = current_app.put_internal('nodes', db_node, _id=node['_id']) if status != 200: log.error('Error %i storing altered node %s %s', status, node['_id'], r) failed_node_ids.add(node['_id']) # raise SystemExit('Error storing node; see log.') log.debug('Updated node %s: %s', node['_id'], r) log.info('Project %s (%s) has %d nodes with attachments', proj_url, proj_id, node_count) log.info('%s %d nodes', 'Updated' if go else 'Would update', total_nodes) if failed_node_ids: log.warning('Failed to update %d of %d nodes: %s', len(failed_node_ids), total_nodes, ', '.join(str(nid) for nid in failed_node_ids))
def upgrade_attachment_schema(proj_url=None, all_projects=False, go=False): """Replaces the project's attachments with the new schema. Updates both the schema definition and the nodes with attachments (asset, page, post). """ if bool(proj_url) == all_projects: log.error('Use either --project or --all.') return 1 from pillar.api.utils.authentication import force_cli_user force_cli_user() from pillar.api.node_types.asset import node_type_asset from pillar.api.node_types.page import node_type_page from pillar.api.node_types.post import node_type_post from pillar.api.utils import remove_private_keys, doc_diff # Node types that support attachments node_types = (node_type_asset, node_type_page, node_type_post) nts_by_name = {nt['name']: nt for nt in node_types} nodes_coll = current_app.db('nodes') def replace_schemas(project): project_url = project.get('url', '-no-url-') log_proj = _single_logger('Upgrading schema project %s (%s)', project_url, project['_id']) orig_proj = copy.deepcopy(project) for proj_nt in project['node_types']: nt_name = proj_nt['name'] if nt_name not in nts_by_name: continue pillar_nt = nts_by_name[nt_name] pillar_dyn_schema = pillar_nt['dyn_schema'] if proj_nt['dyn_schema'] == pillar_dyn_schema: # Schema already up to date. continue log_proj() log.info(' - replacing dyn_schema on node type "%s"', nt_name) proj_nt['dyn_schema'] = copy.deepcopy(pillar_dyn_schema) seen_changes = False for key, val1, val2 in doc_diff(orig_proj, project): if not seen_changes: log.info('Schema changes to project %s (%s):', project_url, project['_id']) seen_changes = True log.info(' - %30s: %s → %s', key, val1, val2) if go: # Use Eve to PUT, so we have schema checking. db_proj = remove_private_keys(project) r, _, _, status = current_app.put_internal('projects', db_proj, _id=project['_id']) if status != 200: log.error('Error %i storing altered project %s %s', status, project['_id'], r) raise SystemExit('Error storing project, see log.') log.debug('Project saved succesfully.') def replace_attachments(project): project_url = project.get('url', '-no-url-') log_proj = _single_logger('Upgrading nodes for project %s (%s)', project_url, project['_id']) # Remove empty attachments if go: res = nodes_coll.update_many( {'properties.attachments': {}, 'project': project['_id']}, {'$unset': {'properties.attachments': 1}}, ) if res.matched_count > 0: log_proj() log.info('Removed %d empty attachment dicts', res.modified_count) else: to_remove = nodes_coll.count({'properties.attachments': {}, 'project': project['_id']}) if to_remove: log_proj() log.info('Would remove %d empty attachment dicts', to_remove) # Convert attachments. nodes = nodes_coll.find({ '_deleted': False, 'project': project['_id'], 'node_type': {'$in': list(nts_by_name)}, 'properties.attachments': {'$exists': True}, }) for node in nodes: attachments = node['properties']['attachments'] if not attachments: # If we're not modifying the database (e.g. go=False), # any attachments={} will not be filtered out earlier. if go or attachments != {}: log_proj() log.info(' - Node %s (%s) still has empty attachments %r', node['_id'], node.get('name'), attachments) continue if isinstance(attachments, dict): # This node has already been upgraded. continue # Upgrade from list [{'slug': 'xxx', 'oid': 'yyy'}, ...] # to dict {'xxx': {'oid': 'yyy'}, ...} log_proj() log.info(' - Updating schema on node %s (%s)', node['_id'], node.get('name')) new_atts = {} for field_info in attachments: for attachment in field_info.get('files', []): new_atts[attachment['slug']] = {'oid': attachment['file']} node['properties']['attachments'] = new_atts log.info(' from %s to %s', attachments, new_atts) if go: # Use Eve to PUT, so we have schema checking. db_node = remove_private_keys(node) r, _, _, status = current_app.put_internal('nodes', db_node, _id=node['_id']) if status != 200: log.error('Error %i storing altered node %s %s', status, node['_id'], r) raise SystemExit('Error storing node; see log.') for proj in _db_projects(proj_url, all_projects, go=go): replace_schemas(proj) replace_attachments(proj)
def patch_edit_from_web(self, org_id: bson.ObjectId, patch: dict): """Updates Organization fields from the web. The PATCH command supports the following payload. The 'name' field must be set, all other fields are optional. When an optional field is ommitted it will be handled as an instruction to clear that field. {'name': str, 'description': str, 'website': str, 'location': str, 'ip_ranges': list of human-readable IP ranges} """ from pymongo.results import UpdateResult from . import ip_ranges self._assert_is_admin(org_id) user = current_user() current_user_id = user.user_id # Only take known fields from the patch, don't just copy everything. update = { 'name': patch['name'].strip(), 'description': patch.get('description', '').strip(), 'website': patch.get('website', '').strip(), 'location': patch.get('location', '').strip(), } unset = {} # Special transformation for IP ranges iprs = patch.get('ip_ranges') if iprs: ipr_docs = [] for r in iprs: try: doc = ip_ranges.doc(r, min_prefixlen6=48, min_prefixlen4=8) except ValueError as ex: raise wz_exceptions.UnprocessableEntity(f'Invalid IP range {r!r}: {ex}') ipr_docs.append(doc) update['ip_ranges'] = ipr_docs else: unset['ip_ranges'] = True refresh_user_roles = False if user.has_cap('admin'): if 'seat_count' in patch: update['seat_count'] = int(patch['seat_count']) if 'org_roles' in patch: org_roles = [stripped for stripped in (role.strip() for role in patch['org_roles']) if stripped] if not all(role.startswith('org-') for role in org_roles): raise wz_exceptions.UnprocessableEntity( 'Invalid role given, all roles must start with "org-"') update['org_roles'] = org_roles refresh_user_roles = True self.log.info('User %s edits Organization %s: %s', current_user_id, org_id, update) validator = current_app.validator_for_resource('organizations') if not validator.validate_update(update, org_id): resp = jsonify({ '_errors': validator.errors, '_message': ', '.join(f'{field}: {error}' for field, error in validator.errors.items()), }) resp.status_code = 422 return resp # Figure out what to set and what to unset for_mongo = {'$set': update} if unset: for_mongo['$unset'] = unset organizations_coll = current_app.db('organizations') result: UpdateResult = organizations_coll.update_one({'_id': org_id}, for_mongo) if result.matched_count != 1: self.log.warning('User %s edits Organization %s but update matched %i items', current_user_id, org_id, result.matched_count) raise wz_exceptions.BadRequest() if refresh_user_roles: self.log.info('Organization roles set for org %s, refreshing users', org_id) current_app.org_manager.refresh_all_user_roles(org_id) return '', 204
def insert_or_fetch_user(wh_payload: dict) -> typing.Optional[dict]: """Fetch the user from the DB or create it. Only creates it if the webhook payload indicates they could actually use Blender Cloud (i.e. demo or subscriber). This prevents us from creating Cloud accounts for Blender Network users. :returns the user document, or None when not created. """ users_coll = current_app.db('users') my_log = log.getChild('insert_or_fetch_user') bid_str = str(wh_payload['id']) email = wh_payload['email'] # Find the user by their Blender ID, or any of their email addresses. # We use one query to find all matching users. This is done as a # consistency check; if more than one user is returned, we know the # database is inconsistent with Blender ID and can emit a warning # about this. query = {'$or': [ {'auth.provider': 'blender-id', 'auth.user_id': bid_str}, {'email': {'$in': [wh_payload['old_email'], email]}}, ]} db_users = list(users_coll.find(query)) user_count = len(db_users) if user_count > 1: # Now we have to pay the price for finding users in one query; we # have to prioritise them and return the one we think is most reliable. calc_score = functools.partial(score, wh_payload) best_score = max(db_users, key=calc_score) my_log.error('%d users found for query %s, picking user %s (%s)', user_count, query, best_score['_id'], best_score['email']) return best_score if user_count: db_user = db_users[0] my_log.debug('found user %s', db_user['email']) return db_user # Pretend to create the user, so that we can inspect the resulting # capabilities. This is more future-proof than looking at the list # of roles in the webhook payload. username = make_unique_username(email) user_doc = create_new_user_document(email, bid_str, username, provider='blender-id', full_name=wh_payload['full_name']) # Figure out the user's eventual roles. These aren't stored in the document yet, # because that's handled by the badger service. eventual_roles = [subscription.ROLES_BID_TO_PILLAR[r] for r in wh_payload.get('roles', []) if r in subscription.ROLES_BID_TO_PILLAR] user_ob = UserClass.construct('', user_doc) user_ob.roles = eventual_roles user_ob.collect_capabilities() create = (user_ob.has_cap('subscriber') or user_ob.has_cap('can-renew-subscription') or current_app.org_manager.user_is_unknown_member(email)) if not create: my_log.info('Received update for unknown user %r without Cloud access (caps=%s)', wh_payload['old_email'], user_ob.capabilities) return None # Actually create the user in the database. r, _, _, status = current_app.post_internal('users', user_doc) if status != 201: my_log.error('unable to create user %s: : %r %r', email, status, r) raise wz_exceptions.InternalServerError('unable to create user') user_doc.update(r) my_log.info('created user %r = %s to allow immediate Cloud access', email, user_doc['_id']) return user_doc
def user_modified(): """Update the local user based on the info from Blender ID. If the payload indicates the user has access to Blender Cloud (or at least a renewable subscription), create the user if not already in our DB. The payload we expect is a dictionary like: {'id': 12345, # the user's ID in Blender ID 'old_email': '*****@*****.**', 'full_name': 'Harry', 'email': 'new@example'com, 'avatar_changed': True, 'roles': ['role1', 'role2', …]} """ my_log = log.getChild('user_modified') my_log.debug('Received request from %s', request.remote_addr) hmac_secret = current_app.config['BLENDER_ID_WEBHOOK_USER_CHANGED_SECRET'] payload = webhook_payload(hmac_secret) my_log.info('payload: %s', payload) # Update the user db_user = insert_or_fetch_user(payload) if not db_user: my_log.info('Received update for unknown user %r', payload['old_email']) return '', 204 # Use direct database updates to change the email and full name. # Also updates the db_user dict so that local_user below will have # the updated information. updates = {} if db_user['email'] != payload['email']: my_log.info('User changed email from %s to %s', payload['old_email'], payload['email']) updates['email'] = payload['email'] db_user['email'] = payload['email'] if db_user['full_name'] != payload['full_name']: my_log.info('User changed full name from %r to %r', db_user['full_name'], payload['full_name']) if payload['full_name']: updates['full_name'] = payload['full_name'] else: # Fall back to the username when the full name was erased. updates['full_name'] = db_user['username'] db_user['full_name'] = updates['full_name'] if payload.get('avatar_changed'): import pillar.celery.avatar my_log.info('User %s changed avatar, scheduling download', db_user['_id']) pillar.celery.avatar.sync_avatar_for_user.delay(str(db_user['_id'])) if updates: users_coll = current_app.db('users') update_res = users_coll.update_one({'_id': db_user['_id']}, {'$set': updates}) if update_res.matched_count != 1: my_log.error('Unable to find user %s to update, even though ' 'we found them by email address %s', db_user['_id'], payload['old_email']) # Defer to Pillar to do the role updates. local_user = UserClass.construct('', db_user) subscription.do_update_subscription(local_user, payload) return '', 204
def sync_avatar(user_id: ObjectId) -> str: """Fetch the user's avatar from Blender ID and save to storage. Errors are logged but do not raise an exception. :return: the link to the avatar, or '' if it was not processed. """ users_coll = current_app.db('users') db_user = users_coll.find_one({'_id': user_id}) old_avatar_info = db_user.get('avatar', {}) if isinstance(old_avatar_info, ObjectId): old_avatar_info = {'file': old_avatar_info} home_proj = home_project.get_home_project(user_id) if not home_project: log.error( 'Home project of user %s does not exist, unable to store avatar', user_id) return '' bid_userid = blender_id.get_user_blenderid(db_user) if not bid_userid: log.error('User %s has no Blender ID user-id, unable to fetch avatar', user_id) return '' avatar_url = blender_id.avatar_url(bid_userid) bid_session = blender_id.Session() # Avoid re-downloading the same avatar. request_headers = {} if avatar_url == old_avatar_info.get('last_downloaded_url') and \ old_avatar_info.get('last_modified'): request_headers['If-Modified-Since'] = old_avatar_info.get( 'last_modified') log.info('Downloading avatar for user %s from %s', user_id, avatar_url) resp = bid_session.get(avatar_url, headers=request_headers, allow_redirects=True) if resp.status_code == 304: # File was not modified, we can keep the old file. log.debug( 'Avatar for user %s was not modified on Blender ID, not re-downloading', user_id) return _get_file_link(old_avatar_info['file']) resp.raise_for_status() mime_type = resp.headers['Content-Type'] file_extension = _extension_for_mime(mime_type) if not file_extension: log.error( 'No file extension known for mime type %s, unable to handle avatar of user %s', mime_type, user_id) return '' filename = f'avatar-{user_id}{file_extension}' fake_local_file = io.BytesIO(resp.content) fake_local_file.name = filename # Act as if this file was just uploaded by the user, so we can reuse # existing Pillar file-handling code. log.debug("Uploading avatar for user %s to storage", user_id) uploaded_file = FileStorage( stream=fake_local_file, filename=filename, headers=resp.headers, content_type=mime_type, content_length=resp.headers['Content-Length'], ) with pillar.auth.temporary_user(db_user): upload_data = pillar.api.file_storage.upload_and_process( fake_local_file, uploaded_file, str(home_proj['_id']), # Disallow image processing, as it's a tiny file anyway and # we'll just serve the original. may_process_file=False, ) file_id = ObjectId(upload_data['file_id']) avatar_info = { 'file': file_id, 'last_downloaded_url': resp.url, 'last_modified': resp.headers.get('Last-Modified'), } # Update the user to store the reference to their avatar. old_avatar_file_id = old_avatar_info.get('file') update_result = users_coll.update_one({'_id': user_id}, {'$set': { 'avatar': avatar_info }}) if update_result.matched_count == 1: log.debug('Updated avatar for user ID %s to file %s', user_id, file_id) else: log.warning( 'Matched %d users while setting avatar for user ID %s to file %s', update_result.matched_count, user_id, file_id) if old_avatar_file_id: current_app.delete_internal('files', _id=old_avatar_file_id) return _get_file_link(file_id)
def owning_users(self, owner_gid: bson.ObjectId) -> typing.List[dict]: assert isinstance(owner_gid, bson.ObjectId) users_coll = current_app.db('users') users = users_coll.find({'groups': owner_gid}) return list(users)
def get_random_featured_nodes() -> typing.List[dict]: """Returns a list of project/node combinations for featured nodes. A random subset of 3 featured nodes from all public projects is returned. Assumes that the user actually has access to the public projects' nodes. The dict is a node, with a 'project' key that contains a projected project. """ proj_coll = current_app.db('projects') featured_nodes = proj_coll.aggregate([ { '$match': { 'is_private': False } }, { '$project': { 'nodes_featured': True, 'url': True, 'name': True, 'summary': True, 'picture_square': True } }, { '$unwind': { 'path': '$nodes_featured' } }, { '$sample': { 'size': 6 } }, { '$lookup': { 'from': 'nodes', 'localField': 'nodes_featured', 'foreignField': '_id', 'as': 'node' } }, { '$unwind': { 'path': '$node' } }, { '$match': { 'node._deleted': { '$ne': True } } }, { '$project': { 'url': True, 'name': True, 'summary': True, 'picture_square': True, 'node._id': True, 'node.name': True, 'node.permissions': True, 'node.picture': True, 'node.properties.content_type': True, 'node.properties.duration_seconds': True, 'node.properties.url': True, 'node._created': True, } }, ]) featured_node_documents = [] api = system_util.pillar_api() for node_info in featured_nodes: # Turn the project-with-node doc into a node-with-project doc. node_document = node_info.pop('node') node_document['project'] = node_info node_document['_id'] = str(node_document['_id']) node = Node(node_document) node.picture = get_file(node.picture, api=api) node.project.picture_square = get_file(node.project.picture_square, api=api) featured_node_documents.append(node) return featured_node_documents
def refresh_roles(self, user_id: bson.ObjectId): """Refreshes the user's roles to own roles + organizations' roles.""" assert isinstance(user_id, bson.ObjectId) from pillar.api.service import do_badger self._log.info('Refreshing roles for user %s', user_id) org_coll = current_app.db('organizations') # Aggregate all org-given roles for this user. query = org_coll.aggregate([{ '$match': { 'members': user_id } }, { '$project': { 'org_roles': 1 } }, { '$unwind': { 'path': '$org_roles' } }, { '$group': { '_id': None, 'org_roles': { '$addToSet': '$org_roles' }, } }]) # If the user has no organizations at all, the query will have no results. try: org_roles_doc = query.next() except StopIteration: org_roles = set() else: org_roles = set(org_roles_doc['org_roles']) users_coll = current_app.db('users') user_doc = users_coll.find_one(user_id, projection={'roles': 1}) if not user_doc: self._log.warning( 'Trying refresh roles of non-existing user %s, ignoring', user_id) return all_user_roles = set(user_doc.get('roles') or []) existing_org_roles = { role for role in all_user_roles if role.startswith('org-') } grant_roles = org_roles - all_user_roles revoke_roles = existing_org_roles - org_roles if grant_roles: do_badger('grant', roles=grant_roles, user_id=user_id) if revoke_roles: do_badger('revoke', roles=revoke_roles, user_id=user_id)
def remove_user(self, org_id: bson.ObjectId, *, user_id: bson.ObjectId = None, email: str = None) -> dict: """Removes a user from the organization. The user can be identified by either user ID or email. Returns the new organization document. """ users_coll = current_app.db('users') assert user_id or email # Collect the email address if not given. This ensures the removal # if the email was accidentally in the unknown_members list. if email is None: user_doc = users_coll.find_one(user_id, projection={'email': 1}) if user_doc is not None: email = user_doc['email'] # See if we know this user. if user_id is None: user_doc = users_coll.find_one({'email': email}, projection={'_id': 1}) if user_doc is not None: user_id = user_doc['_id'] if user_id and not users_coll.count({'_id': user_id}): raise wz_exceptions.UnprocessableEntity('User does not exist') self._log.info('Removing user %s / %s from organization %s', user_id, email, org_id) org_doc = self._get_org(org_id) # Compute the new members. if user_id: members = set(org_doc.get('members') or []) - {user_id} org_doc['members'] = list(members) if email: unknown_members = set(org_doc.get('unknown_members')) - {email} org_doc['unknown_members'] = list(unknown_members) r, _, _, status = current_app.put_internal( 'organizations', remove_private_keys(org_doc), _id=org_id) if status != 200: self._log.error( 'Error updating organization; status should be 200, not %i: %s', status, r) raise ValueError( f'Unable to update organization, status code {status}') org_doc.update(r) # Update the roles for the affected member. if user_id: self.refresh_roles(user_id) return org_doc
def user_is_unknown_member(self, member_email: str) -> bool: """Return True iff the email is an unknown member of some org.""" org_coll = current_app.db('organizations') org_count = org_coll.count({'unknown_members': member_email}) return bool(org_count)
def refresh_roles(self, user_id: bson.ObjectId) -> typing.Set[str]: """Refreshes the user's roles to own roles + organizations' roles. :returns: the applied set of roles. """ assert isinstance(user_id, bson.ObjectId) from pillar.api.service import do_badger self._log.info('Refreshing roles for user %s', user_id) org_coll = current_app.db('organizations') tokens_coll = current_app.db('tokens') def aggr_roles(coll, match: dict) -> typing.Set[str]: query = coll.aggregate([ {'$match': match}, {'$project': {'org_roles': 1}}, {'$unwind': {'path': '$org_roles'}}, {'$group': { '_id': None, 'org_roles': {'$addToSet': '$org_roles'}, }}]) # If the user has no organizations/tokens at all, the query will have no results. try: org_roles_doc = query.next() except StopIteration: return set() return set(org_roles_doc['org_roles']) # Join all organization-given roles and roles from the tokens collection. org_roles = aggr_roles(org_coll, {'members': user_id}) self._log.debug('Organization-given roles for user %s: %s', user_id, org_roles) token_roles = aggr_roles(tokens_coll, { 'user': user_id, 'expire_time': {"$gt": utcnow()}, }) self._log.debug('Token-given roles for user %s: %s', user_id, token_roles) org_roles.update(token_roles) users_coll = current_app.db('users') user_doc = users_coll.find_one(user_id, projection={'roles': 1}) if not user_doc: self._log.warning('Trying refresh roles of non-existing user %s, ignoring', user_id) return set() all_user_roles = set(user_doc.get('roles') or []) existing_org_roles = {role for role in all_user_roles if role.startswith('org-')} grant_roles = org_roles - all_user_roles revoke_roles = existing_org_roles - org_roles if grant_roles: do_badger('grant', roles=grant_roles, user_id=user_id) if revoke_roles: do_badger('revoke', roles=revoke_roles, user_id=user_id) return all_user_roles.union(grant_roles) - revoke_roles
def view_job(project, flamenco_props, job_id): if not request.is_xhr: return for_project(project, job_id=job_id) # Job list is public, job details are not. if not current_user.has_cap('flamenco-view'): raise wz_exceptions.Forbidden() from .sdk import Job from ..managers.sdk import Manager api = pillar_api() job = Job.find(job_id, api=api) try: manager = Manager.find(job.manager, api=api) except pillarsdk.ForbiddenAccess: # It's very possible that the user doesn't have access to this Manager. manager = None except pillarsdk.ResourceNotFound: log.warning('Flamenco job %s has a non-existant manager %s', job_id, job.manager) manager = None users_coll = current_app.db('users') user = users_coll.find_one(bson.ObjectId(job.user), projection={'username': 1, 'full_name': 1}) if user: username = user.get('username', '') full_name = user.get('full_name', '') user_name = f'{full_name} (@{username})'.strip() or '-unknown-' else: user_name = '-unknown-' from . import (CANCELABLE_JOB_STATES, REQUEABLE_JOB_STATES, RECREATABLE_JOB_STATES, ARCHIVE_JOB_STATES, ARCHIVEABLE_JOB_STATES, FAILED_TASKS_REQUEABLE_JOB_STATES) auth = current_flamenco.auth write_access = auth.current_user_may(auth.Actions.USE, bson.ObjectId(project['_id'])) status = job['status'] is_archived = status in ARCHIVE_JOB_STATES archive_available = is_archived and job.archive_blob_name # Sort job settings so we can iterate over them in a deterministic way. job_settings = collections.OrderedDict((key, job.settings[key]) for key in sorted(job.settings.to_dict().keys())) change_prio_states = RECREATABLE_JOB_STATES | REQUEABLE_JOB_STATES | CANCELABLE_JOB_STATES return render_template( 'flamenco/jobs/view_job_embed.html', job=job, user_name=user_name, manager=manager.to_dict(), project=project, flamenco_props=flamenco_props.to_dict(), flamenco_context=request.args.get('context'), can_cancel_job=write_access and status in CANCELABLE_JOB_STATES, can_requeue_job=write_access and status in REQUEABLE_JOB_STATES, can_recreate_job=write_access and status in RECREATABLE_JOB_STATES, can_archive_job=write_access and status in ARCHIVEABLE_JOB_STATES, # TODO(Sybren): check that there are actually failed tasks before setting to True: can_requeue_failed_tasks=write_access and status in FAILED_TASKS_REQUEABLE_JOB_STATES, can_change_prio=write_access and status in change_prio_states, can_edit_rna_overrides=write_access and job['job_type'] in blender_render.job_types(), is_archived=is_archived, write_access=write_access, archive_available=archive_available, job_settings=job_settings, job_status_help=HELP_FOR_STATUS.get(status, ''), )