def get_metadata_files(draft): data = draft.registration_metadata for q, question in get_file_questions('prereg-prize.json'): if not isinstance(data[q]['value'], dict): for i, file_info in enumerate(data[q]['extra']): provider = file_info['data']['provider'] if provider != 'osfstorage': raise Http404( 'File does not exist in OSFStorage ({}: {})'.format( q, question)) file_guid = file_info.get('fileId') if not file_guid: node = Node.load(file_info.get('nodeId')) path = file_info['data'].get('path') item = FileNode.resolve_class(provider, FileNode.FILE).get_or_create( node, path) file_guid = item.get_guid(create=True)._id data[q]['extra'][i]['fileId'] = file_guid draft.update_metadata(data) draft.save() else: guid = Guid.load(file_guid) item = guid.referent if item is None: raise Http404( 'File with guid "{}" in "{}" does not exist'.format( file_guid, question)) yield item continue for i, file_info in enumerate(data[q]['value']['uploader']['extra']): provider = file_info['data']['provider'] if provider != 'osfstorage': raise Http404( 'File does not exist in OSFStorage ({}: {})'.format( q, question)) file_guid = file_info.get('fileId') if not file_guid: node = Node.load(file_info.get('nodeId')) path = file_info['data'].get('path') item = FileNode.resolve_class(provider, FileNode.FILE).get_or_create( node, path) file_guid = item.get_guid(create=True)._id data[q]['value']['uploader']['extra'][i]['fileId'] = file_guid draft.update_metadata(data) draft.save() else: guid = Guid.load(file_guid) item = guid.referent if item is None: raise Http404( 'File with guid "{}" in "{}" does not exist'.format( file_guid, question)) yield item
def get_target(self, node_id, target_id): node = Node.load(target_id) if node and node_id != target_id: raise ValueError('Cannot post comment to another node.') elif target_id == node_id: return Node.load(node_id) else: comment = Comment.load(target_id) if comment: return comment else: raise ValueError
def get_paginated_response(self, data): """Add number of unread comments to links.meta when viewing list of comments filtered by a target node, file or wiki page.""" response = super(CommentPagination, self).get_paginated_response(data) response_dict = response.data kwargs = self.request.parser_context['kwargs'].copy() if self.request.query_params.get('related_counts', False): target_id = self.request.query_params.get('filter[target]', None) node_id = kwargs.get('node_id', None) node = Node.load(node_id) user = self.request.user if target_id and not user.is_anonymous() and node.is_contributor(user): root_target = Guid.load(target_id) if root_target: page = getattr(root_target.referent, 'root_target_page', None) if page: if not len(data): unread = 0 else: unread = Comment.find_n_unread(user=user, node=node, page=page, root_id=target_id) if self.request.version < '2.1': response_dict['links']['meta']['unread'] = unread else: response_dict['meta']['unread'] = unread return Response(response_dict)
def menbib_oauth_finish(**kwargs): user = get_current_user() if not user: raise HTTPError(http.FORBIDDEN) node = Node.load(session.data.get('menbib_auth_nid')) result = finish_auth() user.add_addon('menbib') user.save() user_settings = user.get_addon('menbib') user_settings.owner = user user_settings.access_token = result.access_token user_settings.refresh_token = result.refresh_token user_settings.token_type = result.token_type user_settings.expires_in = result.expires_in user_settings.save() flash('Successfully authorized Mendeley', 'success') if node: del session.data['menbib_auth_nid'] if node.has_addon('menbib'): node_addon = node.get_addon('menbib') node_addon.set_user_auth(user_settings) node_addon.save() return redirect(node.web_url_for('node_setting')) return redirect(web_url_for('user_addons'))
def on_delete(self): super(AddonOAuthUserSettingsBase, self).on_delete() nodes = [Node.load(node_id) for node_id in self.oauth_grants.keys()] for node in nodes: node_addon = node.get_addon(self.oauth_provider.short_name) if node_addon and node_addon.user_settings == self: node_addon.clear_auth()
def update(self, instance, validated_data): view_only_link = instance['self'] nodes = instance['data'] user = self.context['request'].user new_nodes = validated_data['data'] add, remove = self.get_nodes_to_add_remove( nodes=nodes, new_nodes=new_nodes ) for node in remove: if not node.has_permission(user, 'admin'): raise PermissionDenied view_only_link.nodes.remove(node) view_only_link.save() nodes = [Node.load(node) for node in view_only_link.nodes] eligible_nodes = self.get_eligible_nodes(nodes) for node in add: if not node.has_permission(user, 'admin'): raise PermissionDenied if node not in eligible_nodes: raise NonDescendantNodeError(node_id=node._id) view_only_link.nodes.append(node) view_only_link.save() return self.make_instance_obj(view_only_link)
def dropbox_oauth_finish(auth, **kwargs): """View called when the Oauth flow is completed. Adds a new DropboxUserSettings record to the user and saves the user's access token and account info. """ if not auth.logged_in: raise HTTPError(http.FORBIDDEN) user = auth.user node = Node.load(session.data.get('dropbox_auth_nid')) result = finish_auth(node) # If result is a redirect response, follow the redirect if isinstance(result, BaseResponse): return result # Make sure user has dropbox enabled user.add_addon('dropbox') user.save() user_settings = user.get_addon('dropbox') user_settings.owner = user user_settings.access_token = result.access_token user_settings.dropbox_id = result.dropbox_id client = get_client_from_user_settings(user_settings) user_settings.dropbox_info = client.account_info() user_settings.save() if node: del session.data['dropbox_auth_nid'] # Automatically use newly-created auth if node.has_addon('dropbox'): node_addon = node.get_addon('dropbox') node_addon.set_user_auth(user_settings) node_addon.save() return redirect(node.web_url_for('node_setting')) return redirect(web_url_for('user_addons'))
def update_file_guid_referent(self, node, event_type, payload, user=None): if event_type == 'addon_file_moved' or event_type == 'addon_file_renamed': source = payload['source'] destination = payload['destination'] source_node = Node.load(source['node']['_id']) destination_node = node file_guids = FileNode.resolve_class(source['provider'], FileNode.ANY).get_file_guids( materialized_path=source['materialized'] if source['provider'] != 'osfstorage' else source['path'], provider=source['provider'], node=source_node) if event_type == 'addon_file_renamed' and source['provider'] in settings.ADDONS_BASED_ON_IDS: return if event_type == 'addon_file_moved' and (source['provider'] == destination['provider'] and source['provider'] in settings.ADDONS_BASED_ON_IDS) and source_node == destination_node: return for guid in file_guids: obj = Guid.load(guid) if source_node != destination_node and Comment.find(Q('root_target', 'eq', guid)).count() != 0: update_comment_node(guid, source_node, destination_node) if source['provider'] != destination['provider'] or source['provider'] != 'osfstorage': old_file = FileNode.load(obj.referent._id) obj.referent = create_new_file(obj, source, destination, destination_node) obj.save() if old_file and not TrashedFileNode.load(old_file._id): old_file.delete()
def get_node_title(self, obj): user = self.context['request'].user node_title = obj['node']['title'] node = Node.load(obj['node']['_id']) if node.has_permission(user, osf_permissions.READ): return node_title return 'Private Component'
def fix_wiki_titles(wiki_pages): for i, wiki in enumerate(wiki_pages): old_name = wiki["page_name"] new_name = wiki["page_name"].replace("/", "") # update wiki page name db.nodewikipage.update({"_id": wiki["_id"]}, {"$set": {"page_name": new_name}}) logger.info("Updated wiki {} title to {}".format(wiki["_id"], new_name)) node = Node.load(wiki["node"]) if not node: logger.info("Invalid node {} for wiki {}".format(node, wiki["_id"])) continue # update node wiki page records if old_name in node.wiki_pages_versions: node.wiki_pages_versions[new_name] = node.wiki_pages_versions[old_name] del node.wiki_pages_versions[old_name] if old_name in node.wiki_pages_current: node.wiki_pages_current[new_name] = node.wiki_pages_current[old_name] del node.wiki_pages_current[old_name] if old_name in node.wiki_private_uuids: node.wiki_private_uuids[new_name] = node.wiki_private_uuids[old_name] del node.wiki_private_uuids[old_name] node.save()
def archive_success(dst_pk, job_pk): """Archiver's final callback. For the time being the use case for this task is to rewrite references to files selected in a registration schema (the Prereg Challenge being the first to expose this feature). The created references point to files on the registered_from Node (needed for previewing schema data), and must be re-associated with the corresponding files in the newly created registration. :param str dst_pk: primary key of registration Node note:: At first glance this task makes redundant calls to utils.get_file_map (which returns a generator yielding (<sha256>, <file_metadata>) pairs) on the dst Node. Two notes about utils.get_file_map: 1) this function memoizes previous results to reduce overhead and 2) this function returns a generator that lazily fetches the file metadata of child Nodes (it is possible for a selected file to belong to a child Node) using a non-recursive DFS. Combined this allows for a relatively effient implementation with seemingly redundant calls. """ create_app_context() dst = Node.load(dst_pk) # The filePicker extension addded with the Prereg Challenge registration schema # allows users to select files in OSFStorage as their response to some schema # questions. These files are references to files on the unregistered Node, and # consequently we must migrate those file paths after archiver has run. Using # sha256 hashes is a convenient way to identify files post-archival. for schema in dst.registered_schema: if schema.has_files: utils.migrate_file_metadata(dst, schema) job = ArchiveJob.load(job_pk) if not job.sent: job.sent = True job.save() dst.sanction.ask(dst.get_active_contributors_recursive(unique_users=True))
def get_nodes_with_oauth_grants(self, external_account): # Generator of nodes which have grants for this external account return ( Node.load(node_id) for node_id, grants in self.oauth_grants.iteritems() if external_account._id in grants.keys() )
def on_delete(self): """When the user deactivates the addon, clear auth for connected nodes. """ super(AddonOAuthUserSettingsBase, self).on_delete() nodes = [Node.load(node_id) for node_id in self.oauth_grants.keys()] for node in nodes: node_addon = node.get_addon(self.oauth_provider.short_name) if node_addon and node_addon.user_settings == self: node_addon.clear_auth()
def box_oauth_finish(auth, **kwargs): """View called when the Oauth flow is completed. Adds a new BoxUserSettings record to the user and saves the user's access token and account info. """ user = auth.user node = Node.load(session.data.pop('box_auth_nid', None)) # Handle request cancellations from Box's API if request.args.get('error'): flash('Box authorization request cancelled.') if node: return redirect(node.web_url_for('node_setting')) return redirect(web_url_for('user_addons')) result = finish_auth() # If result is a redirect response, follow the redirect if isinstance(result, BaseResponse): return result client = BoxClient(CredentialsV2( result['access_token'], result['refresh_token'], settings.BOX_KEY, settings.BOX_SECRET, )) about = client.get_user_info() oauth_settings = BoxOAuthSettings.load(about['id']) if not oauth_settings: oauth_settings = BoxOAuthSettings(user_id=about['id'], username=about['name']) oauth_settings.save() oauth_settings.refresh_token = result['refresh_token'] oauth_settings.access_token = result['access_token'] oauth_settings.expires_at = datetime.utcfromtimestamp(time.time() + 3600) # Make sure user has box enabled user.add_addon('box') user.save() user_settings = user.get_addon('box') user_settings.oauth_settings = oauth_settings user_settings.save() flash('Successfully authorized Box', 'success') if node: # Automatically use newly-created auth if node.has_addon('box'): node_addon = node.get_addon('box') node_addon.set_user_auth(user_settings) node_addon.save() return redirect(node.web_url_for('node_setting')) return redirect(web_url_for('user_addons'))
def get_targets(db, addon_class): """Generate affected nodes.""" query = db['node'].find({ '.'.join(('__backrefs', 'addons', addon_class.__name__.lower(), 'owner')): { '$size': 2 } }) return (Node.load(node['_id']) for node in query)
def _rejection_url_context(self, user_id): user_approval_state = self.approval_state.get(user_id, {}) rejection_token = self.approval_state.get(user_id, {}).get("rejection_token") if rejection_token: from website.project.model import Node root_registration = self._get_registration() node_id = user_approval_state.get("node_id", root_registration._id) registration = Node.load(node_id) return {"node_id": registration.registered_from._id, "token": rejection_token}
def update_comment_root_target_file(self, node, event_type, payload, user=None): if event_type == 'addon_file_moved': source = payload['source'] destination = payload['destination'] source_node = Node.load(source['node']['_id']) destination_node = node if (source.get('provider') == destination.get('provider') == 'osfstorage') and source_node._id != destination_node._id: obj = FileNode.load(source.get('path').strip('/')) update_folder_contents([obj], source_node, destination_node)
def find_registration_file(value, node): orig_sha256 = value['extra']['sha256'] orig_name = value['extra']['selectedFileName'] orig_node = value['extra']['nodeId'] file_map = utils.get_file_map(node) for sha256, value, node_id in file_map: registered_from_id = Node.load(node_id).registered_from._id if sha256 == orig_sha256 and registered_from_id == orig_node and orig_name == value['name']: return value, node_id return None, None
def get_paginated_response(self, data): """ Add number of bibliographic contributors to links.meta""" response = super(NodeContributorPagination, self).get_paginated_response(data) response_dict = response.data kwargs = self.request.parser_context["kwargs"].copy() node_id = kwargs.get("node_id", None) node = Node.load(node_id) total_bibliographic = len(node.visible_contributor_ids) response_dict["links"]["meta"]["total_bibliographic"] = total_bibliographic return Response(response_dict)
def googledrive_oauth_finish(auth, **kwargs): """View called when the Oauth flow is completed. Adds a new GoogleDriveUserSettings record to the user and saves the user's access token and account info. """ user = auth.user node = Node.load(session.data.pop('googledrive_auth_nid', None)) # Handle request cancellations from Google's API if request.args.get('error'): flash('Google Drive authorization request cancelled.') if node: return redirect(node.web_url_for('node_setting')) return redirect(web_url_for('user_addons')) user.add_addon('googledrive') user.save() code = request.args.get('code') user_settings = user.get_addon('googledrive') state = session.data.pop('googledrive_auth_state') if state != request.args.get('state'): raise HTTPError(http.BAD_REQUEST) if code is None: raise HTTPError(http.BAD_REQUEST) auth_client = GoogleAuthClient() token = auth_client.finish(code) info = auth_client.userinfo(token['access_token']) # Attempt to attach an existing oauth settings model oauth_settings = GoogleDriveOAuthSettings.load(info['sub']) # Create a new oauth settings model if not oauth_settings: oauth_settings = GoogleDriveOAuthSettings() oauth_settings.user_id = info['sub'] oauth_settings.save() user_settings.oauth_settings = oauth_settings user_settings.username = info['name'] user_settings.access_token = token['access_token'] user_settings.refresh_token = token['refresh_token'] user_settings.expires_at = datetime.utcfromtimestamp(token['expires_at']) user_settings.save() flash('Successfully authorized Google Drive', 'success') if node: if node.has_addon('googledrive'): node_addon = node.get_addon('googledrive') node_addon.set_user_auth(user_settings) node_addon.save() return redirect(node.web_url_for('node_setting')) return redirect(web_url_for('user_addons'))
def get_targets(db, addon_class): """Generate affected nodes.""" query = db['node'].find({ '.'.join( ('__backrefs', 'addons', addon_class.__name__.lower(), 'owner' ) ): {'$size': 2} }) return (Node.load(node['_id']) for node in query)
def get_affected_nodes(db, addon_class): """Generate affected nodes.""" query = db['node'].find({ '.'.join( ('__backrefs', 'addons', addon_class.__name__.lower(), 'owner', '0' ) ): {'$exists': False} }) return (Node.load(node['_id']) for node in query)
def clone_wiki(self, node_id): """Clone a node wiki page. :param node: The Node of the cloned wiki page :return: The cloned wiki page """ node = Node.load(node_id) if not node: raise ValueError('Invalid node') clone = self.clone() clone.node = node clone.user = self.user clone.save() return clone
def get_paginated_response(self, data): """ Add number of bibliographic contributors to links.meta""" response = super(NodeContributorPagination, self).get_paginated_response(data) response_dict = response.data kwargs = self.request.parser_context['kwargs'].copy() node_id = kwargs.get('node_id', None) node = Node.load(node_id) total_bibliographic = len(node.visible_contributor_ids) if self.request.version < '2.1': response_dict['links']['meta']['total_bibliographic'] = total_bibliographic else: response_dict['meta']['total_bibliographic'] = total_bibliographic return Response(response_dict)
def _rejection_url_context(self, user_id): user_approval_state = self.approval_state.get(user_id, {}) rejection_token = user_approval_state.get('rejection_token') if rejection_token: from website.project.model import Node root_registration = Node.find_one(Q('retraction', 'eq', self)) node_id = user_approval_state.get('node_id', root_registration._id) registration = Node.load(node_id) return { 'node_id': registration.registered_from._id, 'token': rejection_token, }
def get_nodes_to_add_remove(self, nodes, new_nodes): diff = relationship_diff( current_items={node._id: node for node in nodes}, new_items={node['_id']: node for node in new_nodes} ) nodes_to_add = [] for node_id in diff['add']: node = Node.load(node_id) if not node: raise NotFound nodes_to_add.append(node) return nodes_to_add, diff['remove'].values()
def migrate_project_contributed(user): count = 0 for node_id in user.node__contributed: node = Node.load(node_id) if node._primary_key in user.unclaimed_records: del user.unclaimed_records[node._primary_key] node.contributors.remove(user._id) node.clear_permission(user) if user._id in node.visible_contributor_ids: node.visible_contributor_ids.remove(user._id) node.save() count += 1 logger.info("Removed user - {} as a contributor from project - {}".format(user._id, node._id)) logger.info("Removed user - {} as a contributor from {} projects".format(user._id, count))
def migrate_project_contributed(user): count = 0 for node_id in user.node__contributed: node = Node.load(node_id) if node._primary_key in user.unclaimed_records: del user.unclaimed_records[node._primary_key] node.contributors.remove(user._id) node.clear_permission(user) if user._id in node.visible_contributor_ids: node.visible_contributor_ids.remove(user._id) node.save() count += 1 logger.info('Removed user - {} as a contributor from project - {}'.format(user._id, node._id)) logger.info('Removed user - {} as a contributor from {} projects'.format(user._id, count))
def update_comment_root_target_file(self, node, event_type, payload, user=None): if event_type == 'addon_file_moved': source = payload['source'] destination = payload['destination'] source_node = Node.load(source['node']['_id']) destination_node = node if (source.get('provider') == destination.get('provider') == 'osfstorage') and source_node._id != destination_node._id: old_file = FileNode.load(source.get('path').strip('/')) new_file = FileNode.resolve_class(destination.get('provider'), FileNode.FILE).get_or_create(destination_node, destination.get('path')) Comment.update(Q('root_target', 'eq', old_file._id), data={'node': destination_node}) # update node record of commented files if old_file._id in source_node.commented_files: destination_node.commented_files[new_file._id] = source_node.commented_files[old_file._id] del source_node.commented_files[old_file._id] source_node.save() destination_node.save()
def box_oauth_start(auth, **kwargs): user = auth.user # Store the node ID on the session in order to get the correct redirect URL # upon finishing the flow nid = kwargs.get('nid') or kwargs.get('pid') node = Node.load(nid) if node and not node.is_contributor(user): raise HTTPError(http.FORBIDDEN) csrf_token = security.random_string(10) session.data['box_oauth_state'] = csrf_token if nid: session.data['box_auth_nid'] = nid # Handle if user has already authorized box if user.has_addon('box') and user.get_addon('box').has_auth: return redirect(web_url_for('user_addons')) return redirect(get_auth_flow(csrf_token))
def get_paginated_response(self, data): """Add number of unread comments to links.meta when viewing list of comments filtered by a target node, file or wiki page.""" response = super(CommentPagination, self).get_paginated_response(data) response_dict = response.data kwargs = self.request.parser_context["kwargs"].copy() if self.request.query_params.get("related_counts", False): target_id = self.request.query_params.get("filter[target]", None) node_id = kwargs.get("node_id", None) node = Node.load(node_id) user = self.request.user if target_id and not user.is_anonymous() and node.is_contributor(user): root_target = Guid.load(target_id) page = getattr(root_target.referent, "root_target_page", None) if page: if not len(data): unread = 0 else: unread = Comment.find_n_unread(user=user, node=node, page=page, root_id=target_id) response_dict["links"]["meta"]["unread"] = unread return Response(response_dict)
def get_target(self, node_id, target_id): node = Node.load(target_id) comment = Comment.load(target_id) target_file = StoredFileNode.load(target_id) if node: if node_id == target_id: return node else: raise ValueError('Cannot post comment to another node.') elif comment: if comment.node._id == node_id: return comment else: raise ValueError('Cannot post reply to comment on another node.') elif target_file: if target_file.provider not in osf_settings.ADDONS_COMMENTABLE: raise ValueError('Comments are not supported for this file provider.') elif target_file.node._id != node_id: raise ValueError('Cannot post comment to file on another node.') else: return target_file else: raise ValueError('Invalid comment target.')
def archive_success(dst_pk, job_pk): """Archiver's final callback. For the time being the use case for this task is to rewrite references to files selected in a registration schema (the Prereg Challenge being the first to expose this feature). The created references point to files on the registered_from Node (needed for previewing schema data), and must be re-associated with the corresponding files in the newly created registration. :param str dst_pk: primary key of registration Node note:: At first glance this task makes redundant calls to utils.get_file_map (which returns a generator yielding (<sha256>, <file_metadata>) pairs) on the dst Node. Two notes about utils.get_file_map: 1) this function memoizes previous results to reduce overhead and 2) this function returns a generator that lazily fetches the file metadata of child Nodes (it is possible for a selected file to belong to a child Node) using a non-recursive DFS. Combined this allows for a relatively effient implementation with seemingly redundant calls. """ create_app_context() dst = Node.load(dst_pk) # The filePicker extension addded with the Prereg Challenge registration schema # allows users to select files in OSFStorage as their response to some schema # questions. These files are references to files on the unregistered Node, and # consequently we must migrate those file paths after archiver has run. Using # sha256 hashes is a convenient way to identify files post-archival. prereg_schema = MetaSchema.find_one( Q('name', 'eq', 'Prereg Challenge') & Q('schema_version', 'eq', 2) ) missing_files = [] if prereg_schema in dst.registered_schema: prereg_metadata = dst.registered_meta[prereg_schema._id] updated_metadata = {} for key, question in prereg_metadata.items(): if isinstance(question['value'], dict): for subkey, subvalue in question['value'].items(): registration_file = None if subvalue.get('extra', {}).get('sha256'): registration_file, node_id = find_registration_file(subvalue, dst) if not registration_file: missing_files.append({ 'file_name': subvalue['extra']['selectedFileName'], 'question_title': find_question(prereg_schema.schema, key)['title'] }) continue subvalue['extra'].update({ 'viewUrl': VIEW_FILE_URL_TEMPLATE.format(node_id=node_id, path=registration_file['path'].lstrip('/')) }) question['value'][subkey] = subvalue else: if question.get('extra', {}).get('sha256'): registration_file, node_id = find_registration_file(question, dst) if not registration_file: missing_files.append({ 'file_name': question['extra']['selectedFileName'], 'question_title': find_question(prereg_schema.schema, key)['title'] }) continue question['extra'].update({ 'viewUrl': VIEW_FILE_URL_TEMPLATE.format(node_id=node_id, path=registration_file['path'].lstrip('/')) }) updated_metadata[key] = question if missing_files: raise ArchivedFileNotFound( registration=dst, missing_files=missing_files ) prereg_metadata.update(updated_metadata) dst.registered_meta[prereg_schema._id] = prereg_metadata dst.save() job = ArchiveJob.load(job_pk) if not job.sent: job.sent = True job.save() dst.sanction.ask(dst.get_active_contributors_recursive(unique_users=True))